Note. Boxplots display the interquartile range (IQR, center box), and the whiskers extend 1.5*IQR from the lower and upper hinge. The white point indicates the mean and the white center line indicates the median.


Note. All multilevel assumptions are tested as usual including (e.g., for random slopes model with j within person predictors):

\[ \begin{align} &\textrm{Level 1 Variance:}\ e_{ti} \sim \mathcal{N}(0,\sigma^2) \\ &\textrm{Level 2 Variance:}\ \begin{bmatrix} u_{0i}\\ \vdots\\ u_{ji}\end{bmatrix} \sim \mathcal{N} \begin{pmatrix} \begin{bmatrix} 0 \\ \vdots \\ 0 \end{bmatrix}, \begin{bmatrix} \tau_{00}^2 & & \\ \vdots & \ddots & \\ \tau_{j0} & \ldots & \tau_{jj}^2 \end{bmatrix} \end{pmatrix} \end{align} \]


Data Preparation

In an initial preparatory step, we import the data into the R project environment and prepare the variables for further processing and later analyses.

Data Import

The data were collected using two different survey tools. For the study with sojourners (Study 1: worker) we used the survey platform Qualtrics XM, whereas the studies with international students (Study 2: student), and the international medical professionals (Study 3: medical) were conducted using the survey framework FormR. This means that the datasets had inconsistent file formats and naming conventions. For the Qualtrics study we pre-processed some variables to ease the import process (for the syntax files see the SPS files in ‘data/S1_Workers/processed/cleaned’ and for the raw data files see ‘data/S1_Workers/raw’). For the two other studies, we import the raw csv files from their respective folders.

# workers
# initial data cleaning was done in SPSS (syntax files are available in "")
dtWorker <- list(
  raw.pre = read_spss("data/S1_Workers/processed/cleaned/MT - Pre-Measure - 06-15-2018.sav"),
  raw.post = read_spss("data/S1_Workers/processed/cleaned/MT - Post-Measure - 06-15-2018.sav"),
  raw.morning = read_spss("data/S1_Workers/processed/cleaned/MT - Morning - 06-15-2018.sav"),
  raw.afternoon = read_spss("data/S1_Workers/processed/cleaned/MT - Afternoon - 06-15-2018.sav")
)

# students
dtStudents <- list(
  raw.pre = read.csv(file = "data/S2_Students/raw/AOTS_Pre.csv", header = T, sep = ","),
  raw.post = read.csv(file = "data/S2_Students/raw/AOTS_Post.csv", header = T, sep = ","),
  raw.daily = read.csv(file = "data/S2_Students/raw/AOTS_Daily.csv", header = T, sep = ",")
)

# young medical professionals
dtMedical <- list(
  raw.eligibility = read.csv("data/S3_Medical/raw/AOTM_Eligibility.csv"),
  raw.pre = read.csv("data/S3_Medical/raw/AOTM_Pre.csv"),
  raw.post = read.csv("data/S3_Medical/raw/AOTM_Post.csv"),
  raw.daily = read.csv("data/S3_Medical/raw/AOTM_Daily.csv")
)

Data Cleaning & Data Exclusions

Worker

For the sojourner sample data was collected in four separate surveys: (1) the pre-measurement, (2) the daily morning survey, (3) the daily afternoon survey, as well as (4) a post-measurement. We combine the four individual surveys into one cohesive dataframe and drop superfluous variables that are not relevant to the analyses relevant here. We then format the time and date variables and add person- and measurement indices (for easy and meaningful addressing of the data). We also exclude our own test data.
Note: All data preparation steps are saved in the ‘dtWorker’ list.

#  important names for Morning and Afternoon
names.m <- c(
  "StartDate",
  "EndDate",
  "Finished",
  "Duration__in_seconds_",
  "RecordedDate",
  "ExternalReference",
  "Meta_Operating_System",
  "Contact_dum",
  "number",
  "time",
  "duration_1",
  "dyad.group",
  "gr_size",
  "gr_type_1",
  "gr_type_2",
  "gr_type_3",
  "gr_type_4",
  "gr_type_5",
  "gr_type_6",
  "gr_type_7",
  "gr_type_8",
  "gr_type_9",
  "gr_type_10",
  "gr_type_11",
  "gr_type_12",
  "gr_type_13",
  "gr_type_14",
  "gr_type_15",
  "gr_type_16",
  "gr_type_17_TEXT",
  "gr_context_1",
  "gr_context_2",
  "gr_context_3",
  "gr_context_4",
  "gr_context_5",
  "gr_context_6",
  "gr_context_7",
  "gr_context_8",
  "gr_context_9",
  "gr_context_10",
  "gr_context_11",
  "gr_context_12",
  "gr_context_13_TEXT",
  "gr_context_14_TEXT",
  "gr_dutchness",
  "dyad_type_1",
  "dyad_type_2",
  "dyad_type_3",
  "dyad_type_4",
  "dyad_type_5",
  "dyad_type_6",
  "dyad_type_7",
  "dyad_type_8",
  "dyad_type_9",
  "dyad_type_10",
  "dyad_type_11",
  "dyad_type_12",
  "dyad_type_13",
  "dyad_type_14",
  "dyad_type_15",
  "dyad_type_16",
  "dyad_type_17_TEXT",
  "Context_1",
  "Context_2",
  "Context_3",
  "Context_4",
  "Context_5",
  "Context_6",
  "Context_7",
  "Context_8",
  "Context_9",
  "Context_10",
  "Context_11",
  "Context_12",
  "Context_13_TEXT",
  "Context_14_TEXT",
  "keyMotive",
  "keymotive_fulfillemt_1",
  "keyMotive_Dutch_1",
  "autonomy_1",
  "competence_1",
  "relatedness_self_1",
  "relatedness_other_1",
  "qualityAccidental_1",
  "qualityVoluntary_1",
  "qualityCooperative_1",
  "qualityDutchy_1",
  "quality_overall_1",
  "quality_meaning_1",
  "quality_star_1",
  "wantInt",
  "desire_type_1",
  "desire_type_2",
  "desire_type_3",
  "desire_type_4",
  "desire_type_5",
  "desire_type_6",
  "desire_type_7",
  "desire_type_8",
  "desire_type_9",
  "desire_type_10",
  "desire_type_11",
  "desire_type_12",
  "desire_type_13",
  "desire_type_14",
  "desire_type_15",
  "desire_type_16",
  "desire_type_17_TEXT",
  "desire_context_1",
  "desire_context_2",
  "desire_context_3",
  "desire_context_4",
  "desire_context_5",
  "desire_context_6",
  "desire_context_7",
  "desire_context_8",
  "desire_context_9",
  "desire_context_10",
  "desire_context_11",
  "desire_context_12",
  "desire_context_13_TEXT",
  "desire_context_14_TEXT",
  "Reason_nodesire",
  "keyMotive_noInt",
  "keyMotive_noInt_fulf_1",
  "autonomy_NoInt_1",
  "competence_NoInt_1",
  "relatedness_1_NoInt_1",
  "thermometerDutch_1",
  "thermometerDutchInt_2",
  "ExWB_1",
  "alertness1",
  "calmness1",
  "valence1",
  "alertness2",
  "calmness2",
  "valence2",
  "inNonDutch",
  "NonDutchNum",
  "NonDutchType_1",
  "NonDutchType_2",
  "NonDutchType_3",
  "NonDutchType_4",
  "NonDutchType_5",
  "NonDutchType_6",
  "NonDutchType_7",
  "NonDutchType_8",
  "NonDutchType_9",
  "NonDutchType_10",
  "NonDutchType_11",
  "NonDutchType_12",
  "NonDutchType_13",
  "NonDutchType_14",
  "NonDutchType_15_TEXT",
  "date",
  "time.0",
  "LocationLatitude",
  "LocationLongitude"
)

names.a <- c(names.m, "keyInteraction_1", "keyInteractionTime")

# Create reduced data sets for morning and afternoon
dat.mo <- dtWorker$raw.morning[, names.m]
dat.mo$daytime <- "morning"

dat.af <- dtWorker$raw.afternoon[, names.a]
dat.af$daytime <- "afternoon"

# merge morning and afternoon measurements with indicator [+ clean up]
daily.dat <- rbind.fill(dat.mo, dat.af)
daily.dat <- daily.dat[daily.dat$ExternalReference != 55951, ]
dtWorker$daily <- daily.dat
rm(dat.mo, dat.af, names.m, names.a, daily.dat)


# names for pre-measurement
names.pre <- c(
  "Finished",
  "age",
  "Gender",
  "Living",
  "roommate_1",
  "roommate_2",
  "roommate_3",
  "nationality",
  "SecondNationality",
  "timeNL_1",
  "Reason_2",
  "Reason_5",
  "Reason_7",
  "Reason_8_TEXT",
  "DutchLang",
  "occupation_1",
  "occupation_2",
  "occupation_3",
  "occupation_4",
  "occupation_7",
  "CurrentEducation_1",
  "education_level",
  "EduLang_2",
  "RUG_faculty",
  "Study.0",
  "association",
  "DutchMeetNum",
  "DutchFriends_1",
  "assimilation",
  "separation",
  "integration",
  "marginalization",
  "VIA_heritage",
  "VIA_Dutch",
  "SSAS_surrounding",
  "SSAS_privat",
  "SSAS_public",
  "autonomy",
  "relatedness",
  "competence",
  "anxiety",
  "swl",
  "alertness",
  "calmness",
  "valence",
  "date",
  "time",
  "City",
  "ZIP",
  "id"
)

# reduced data set for pre measurement
dat.pre.red <- dtWorker$raw.pre[, names.pre]

# merge with daily data [+ clean up]
df.pre <- merge(
  x = dtWorker$daily,
  y = dat.pre.red,
  by.x = "ExternalReference",
  by.y = "id",
  all = T
)
rm(names.pre)

# adjust duplicate names to fit to indicate daily or pre measurement
names(df.pre) <- gsub("[[:punct:]]x", ".daily", names(df.pre))
names(df.pre) <- gsub("[[:punct:]]y", ".pre", names(df.pre))

# names for post measurement
names.post <- c(
  "ExternalReference",
  "assimilation",
  "separation",
  "integration",
  "marginalization",
  "VIA_heritage",
  "VIA_Dutch",
  "anxiety",
  "swl",
  "rosenberg",
  "social_support",
  "stress",
  "discrimination",
  "discrimination_month",
  "NLE_1month",
  "NLE_6month",
  "NLE_12month"
)

# reduced data set for post-measurement
dat.post.red <- dtWorker$raw.post[, names.post]

# merge post measurement with pre- and daily data
df <- merge(
  x = df.pre,
  y = dat.post.red,
  by.x = "ExternalReference",
  by.y = "ExternalReference",
  all = T
)

# adjust duplicate names to indicate pre or post
names(df) <- gsub("[[:punct:]]x", ".pre", names(df))
names(df) <- gsub("[[:punct:]]y", ".post", names(df))

# add to list
dtWorker$combined <- df

# create data frame with cleaned data
df <- dtWorker$combined %>%
  filter(
    Finished.pre == 1,
    Finished.daily == 1,
    !is.na(ExternalReference)
  )

# add running number as measurement ID within participants
df$measureID <- rowidv(df, cols = c("ExternalReference"))

df <- df %>%
  mutate(
    PID = as.numeric(factor(ExternalReference)),
    # participant ID
    TID = measureID - 1,
    # time ID with t0 = 0 for meaningfull intercept interpretations
    date = substr(StartDate, 1, 10),
    # awkward way of extracting date (best converted to )
    time = substr(StartDate, 12, 19),
    # awkward way of extracting time
    daynum = as.numeric(factor(date)),
    # all days as numeric for ordering
    daycor = ifelse(
      daytime == "morning" &
        period_to_seconds(hms(time)) < period_to_seconds(hms("12:00:00")) |
        daytime == "afternoon" &
          period_to_seconds(hms(time)) < period_to_seconds(hms("19:00:00")),
      daynum - 1,
      daynum
    ),
    # correctly identify which date the questionnaire is about
    daycor.lead = sprintf("%02d", daycor),
    daytime.lt = ifelse(daytime == "morning", "a", "b"),
    # morning / afternoon to a / b
    day_time = paste(daycor.lead, daytime.lt, sep = "_"),
    # combine day id with morning / afternoon
    session = as.numeric(factor(day_time)),
    # day and time identifier as numeric id
    SubTime = chron::times(time.0),
    time.daily = as.character(time.daily),
    PPDate = as.Date(df$date.daily),
    number = replace_na(number, 0),
    NonDutchNum = replace_na(NonDutchNum, 0)
  )

dtWorker$clean <- df

# clean up
rm(df.pre, names.post, dat.post.red, dat.pre.red, df)

# Export reduced Data
# write.csv(dtWorker$clean, "data/processed/MT_clean-merged_07-05-2018.csv", row.names = F)
# save(dtWorker$clean, file = "data/processed/MT_clean-merged_07-05-2018.RData")

Student

For the student sample data was, similarly, collected in three separate surveys: (1) the pre-measurement, (2) the daily survey sent out at lunch and dinner time, and (3) a post-measurement. We combine the three individual surveys into one large dataframe and drop superfluous variables that are not relevant to the analyses relevant here. We exclude our own test data as well as one participant who entered the study twice (but gave different responses during the pre-measurement). We also reformat missing values and format core ID variables.
Note: All data preparation steps are saved in the ‘dtStudents’ list.

# our own test IDs
ownIDs <- c(
  "beautifulLionfishXXXR5rcgVBzGu8hPvOqrK8UBJBw4owvi9nfRFSFu3lMzYhE",
  "niceDogoXXXmB8JI5SFu78SF3DVof84mGUPPNUr14p2HYFTtp31a6D1OwAzM6F-K",
  "amusedQuailXXXmhuc_fpTp8vPkMwDH1BzjaH1d1kHSO1bsPEfsnaEYk4WeVBfPi",
  "juwGAbtXX0_1kmZtSVqKh3PGaHOICqUyU4iBkrT3nDsI_uifuD1gzKcZerxaM5FL"
)

# Prepare dfs for Cleaning
df.pre <- dtStudents$raw.pre %>%
  mutate_all(na_if, "") %>%
  mutate_all(na_if, "NA") %>%
  filter(!is.na(ended)) %>% # remove all who did not finish
  filter(!e_mail %in% .$e_mail[duplicated(.$e_mail)]) %>% # remove all who did the pre questionnaire multiple times (b/c inconsistent ratings scales)
  filter(!session %in% ownIDs) %>% # remove our own test
  mutate(session = as.character(session)) # turn factor into character strings (probably just precaution)

df.post <- dtStudents$raw.post %>%
  mutate_all(na_if, "") %>%
  mutate_all(na_if, "NA") %>%
  filter(!is.na(session)) %>% # remove own test runs
  filter(!session %in% ownIDs) %>% # remove our own test
  filter(session %in% df.pre$session) %>% # remove anyone who wasn't in the pre
  filter(!is.na(ended)) %>% # remove all who never finished
  filter(!session %in% .$session[duplicated(.$session)]) %>% # remove all duplicate sessions
  mutate(session = as.character(session)) # turn factor into character strings (probably just precaution)

df.daily <- dtStudents$raw.daily %>%
  mutate_all(na_if, "") %>%
  mutate_all(na_if, "NA") %>%
  filter(!session %in% ownIDs) %>% # remove our own test
  filter(session %in% df.pre$session) %>% # remove anyone who wasn't in the pre
  filter(!is.na(ended)) %>% # remove all who never finished
  mutate(session = as.character(session)) # turn factor into character strings (probably just precaution)

# merge daily with pre
dfPreDaily <- merge(
  x = df.daily,
  y = df.pre,
  by = "session",
  suffixes = c(".daily", ".pre"),
  all = F
)

# merge daily with post
dfCombined <- merge(
  x = dfPreDaily,
  y = df.post,
  by = "session",
  suffixes = c(".pre", ".post"),
  all = F
)

# add to list
dtStudents$clean <- dfCombined

# clean up workspace
rm(df.pre, df.daily, df.post, dfPreDaily, dfCombined, ownIDs)

Medical

For the medical professionals sample data was, again, collected in three separate surveys: (1) the pre-measurement, (2) the daily survey sent out at lunch and dinner time, and (3) a post-measurement. We combine the three individual surveys into one large dataframe. We exclude our own test data. We also reformat missing values and format core ID variables.
Note: All data preparation steps are saved in the ‘dtMedical’ list.

# our own test IDs
ownIDs <- c(
  "test_LeonieXXXSklxecPLW0-FBPM4796o3pUwUhAY5jb9KGw8jQsKxWmGpa1Jiy", 
  "test_MaxXXXtOp_5dTNefIq0yKXtXt2IN6eEKxeHoPY9mlyvdsqPpLp1B0NGg4UL",
  "test_JannisXXXBsNqk62fOpX6chbd2tMWPptUdjjnhAqnQ3uBqckZ7gLIEoPlfZ",
  "quaintLeopardCatXXXAJ9cfSj-_SZLwNwMDxv_xv_iyr1Bg5YFLTlYdrjW0UXZY",
  "blue-eyedIndianElephantXXXLf5zPMpQCDGS3umFzIj-YVky7ivTItvvozW49m"
)

# Prepare dfs for Cleaning
df.pre <- dtMedical$raw.pre %>%
  mutate_all(na_if, "") %>%
  mutate_all(na_if, "NA") %>%
  filter(!is.na(ended)) %>% # remove all who did not finish
  filter(!session %in% ownIDs) %>% # remove our own test
  mutate(session = as.character(session)) # turn factor into character strings (probably just precaution)

df.post <- dtMedical$raw.post %>%
  mutate_all(na_if, "") %>%
  mutate_all(na_if, "NA") %>% 
  filter(!is.na(session)) %>% # remove own test runs
  filter(!session %in% ownIDs) %>% # remove our own test
  filter(session %in% df.pre$session) %>% # remove anyone who wasn't in the pre
  #filter(!is.na(ended)) %>% # remove all who never finished [disabled because only relevant if data is missing]
  filter(!session %in% .$session[duplicated(.$session)]) %>% # remove all duplicate sessions
  mutate(session = as.character(session)) # turn factor into character strings (probably just precaution)

df.daily <- dtMedical$raw.daily %>%
  mutate_all(na_if, "") %>%
  mutate_all(na_if, "NA") %>%
  filter(!session %in% ownIDs) %>% # remove our own test
  filter(session %in% df.pre$session) %>% # remove anyone who wasn't in the pre
  #filter(!is.na(ended)) %>% # remove all who never finished [disabled because only relevant if data is missing]
  mutate(session = as.character(session)) # turn factor into character strings (probably just precaution)

# merge daily with pre
dfPreDaily <- merge(
  x = df.daily,
  y = df.pre,
  by = "session",
  suffixes = c(".daily", ".pre"),
  all = F
)

# merge daily with post
dfCombined <- merge(
  x = dfPreDaily,
  y = df.post,
  by = "session",
  suffixes = c(".pre", ".post"),
  all = F
)

# add to list
dtMedical$clean <- dfCombined

# clean up workspace
rm(df.pre, df.daily, df.post, dfPreDaily, dfCombined, ownIDs)

Calculate needed transformations

Worker

For the worker sample, the data transformation stage had three main aims:

  1. We first corrected time indicators within the surveys. In some cases participants completed their daily diary surveys for the afternoon after midnight. In these cases the measurement still is in reference to the previous day and is indicated in the corrected variable.
  2. We then created indices of scales. Some indices were multi-item scales while some indices combine equivalent measurement for different situational circumstances (e.g., competence perceptions after interactions and at measurement occasions without interactions).
  3. Finally, we calculated several basic participant summaries (averages across all measurement occasions).
df <- dtWorker$clean

# Time and Date Variables
# remove seconds from afternoon time
df$SubTime[df$daytime == "afternoon"] <- paste0(substring(as.character(df$time.0[df$daytime == "afternoon"]), 4, 8), ":00")
df$time.daily[df$daytime == "afternoon" &
  !is.na(df$time.daily != "<NA>")] <- paste0(substring(as.character(df$time.daily[df$daytime == "afternoon" &
  !is.na(df$time.daily != "<NA>")]), 4, 8), ":00")

# Correct morning / afternoon date where survey was collected the day after to indicate the correct date that was targeted
df$PPDate[df$SubTime < "11:50:00" &
  df$daytime == "morning"] <- df$PPDate[df$SubTime < "11:50:00" &
  df$daytime == "morning"] - 1
df$PPDate[df$SubTime < "18:50:00" &
  df$daytime == "afternoon"] <- df$PPDate[df$SubTime < "18:50:00" &
  df$daytime == "afternoon"] - 1

# Need scales
df$keyMotiveFulfilled <- rowSums(df[, c("keymotive_fulfillemt_1", "keyMotive_noInt_fulf_1")], na.rm = T)
df$autonomy.daily.all <- rowSums(df[, c("autonomy_1", "autonomy_NoInt_1")], na.rm = T)
df$competence.daily.all <- rowSums(df[, c("competence_1", "competence_NoInt_1")], na.rm = T)
# cor(df$relatedness_other_1, df$relatedness_self_1,use="complete.obs")
df$relatedness.daily.all <- rowMeans(df[, c(
  "relatedness_other_1",
  "relatedness_self_1",
  "relatedness_1_NoInt_1"
)], na.rm = T)

pairs.panels.new(
  df[c("relatedness_self_1", "relatedness_other_1")],
  labels = c(
    "I shared information about myself.",
    "X shared information about themselves."
  )
)

df$relatedness_1 <- rowMeans(df[, c("relatedness_other_1", "relatedness_self_1")], na.rm = T)

# summarize by participant (check that everything is within pp might not be the case for )
between <- df %>%
  group_by(ExternalReference) %>%
  mutate(
    CtContactNL = sum(Contact_dum),
    CtContactNonNl = sum(inNonDutch),
    CtContactNLAll = sum(number),
    CtContactNonNlAll = sum(NonDutchNum),
    AvKeyNeed = mean(keyMotiveFulfilled, na.rm = T),
    AvKeyNeedInt = mean(keymotive_fulfillemt_1, na.rm = T),
    AvKeyNeedNoInt = mean(keyMotive_noInt_fulf_1, na.rm = T),
    AvAutonomy = mean(autonomy.daily.all, na.rm = T),
    AvCompetence = mean(competence.daily.all, na.rm = T),
    AvRelatedness = mean(relatedness.daily.all, na.rm = T),
    AvThermo = mean(thermometerDutch_1, na.rm = T),
    AvWB = mean(ExWB_1, na.rm = T)
  ) %>%
  ungroup() %>%
  mutate(
    CtContactNL_c = scale(CtContactNL, scale = FALSE),
    AvKeyNeedInt_c = scale(AvKeyNeedInt, scale = FALSE),
    AvKeyNeed_c = scale(AvKeyNeed, scale = FALSE),
    CtContactNL_z = scale(CtContactNL, scale = TRUE),
    AvKeyNeedInt_z = scale(AvKeyNeedInt, scale = TRUE),
    AvKeyNeed_z = scale(AvKeyNeed, scale = TRUE)
  )

warning(
  "some variable transformations (esp. _c and _z) might be across all participants (i.e., not within PP). See next step."
)

dtWorker$full <- between
rm(df, between)

# dataframe where interaction types are recoded
workerInteractionType <- dtWorker$full %>%
  mutate(
    OutgroupInteraction = as_factor(Contact_dum),
    NonOutgroupInteraction = as_factor(inNonDutch)
  )

# Create variables centered and standardized within Participant
# i.e., divide into trait and state
workerWithinBetween <-
  MlTraitState(
    data = workerInteractionType,
    id = "PID",
    selection =
      c(
        "keyMotiveFulfilled",
        "competence.daily.all",
        "autonomy.daily.all",
        "relatedness.daily.all",
        "thermometerDutch_1",
        "keymotive_fulfillemt_1",
        "competence_1",
        "autonomy_1",
        "relatedness_1", 
        "quality_overall_1", 
        "OutgroupInteraction",
        "NonOutgroupInteraction"
      )
  )

workerOutWithinBetween <-
  MlTraitState(
    data = workerInteractionType %>% filter(OutgroupInteraction == "Yes"),
    id = "PID",
    selection =
      c(
        "keyMotiveFulfilled",
        "thermometerDutch_1",
        "keymotive_fulfillemt_1",
        "competence_1",
        "autonomy_1",
        "relatedness_1", 
        "quality_overall_1"
      )
  )


# Between participants contact frequency
workerContactFreq <- dtWorker$full %>%
  group_by(PID) %>%
  summarise(
    n = n(),
    SumContactNL = sum(Contact_dum),
    PercContactNL = SumContactNL / n * 100,
    SumContactNLAll = sum(number),
    AvAttitude = mean(thermometerDutch_1, na.rm = T)
  ) %>%
  mutate(
    WinSumContactNL = DescTools::Winsorize(SumContactNL),
    WinSumContactNLAll = DescTools::Winsorize(SumContactNLAll)
  )

# save cleaned data
# save(df.btw, file = "data/processed/df.btw.RData")
# write_sav(df.btw, "data/processed/MT_clean-merged_pre-post.sav")

# export data to Mplus
# df.mplus = remove_all_labels(select(df,
#                                     PID, session,
#                                     thermometerDutch_1, inNonDutch, Contact_dum,
#                                     keyMotiveFulfilled, autonomy.daily.all, competence.daily.all, relatedness.daily.all))
# names(df.mplus)= c("PID", "session", "att", "intin", "intout", "keymot", "aut", "comp", "rel")
# mplus = df.mplus[order(df.mplus$PID, df.mplus$session),]
# mplus.intcont = mplus[mplus$intout==1,]
# prepareMplusData(mplus.intcont, "data/processed/dynamic-subset-intonly.dat")

Student

For the student sample, the data transformation stage had five main aims:

  1. We first create person, survey type, and measurement ID variables.
  2. We then created indices of scales. Some indices were multi-item scales while some indices combine equivalent measurement for different situational circumstances (e.g., competence perceptions after interactions and at measurement occasions without interactions).
  3. We add information about the interaction partner to the beep during which a person was selected as an interaction partner.
  4. We cluster mean-center key variables within participants.
  5. Finally, we calculated several basic participant summaries (averages across all measurement occasions).
df <- dtStudents$clean

# Add ID variables
df$PID <- as.numeric(factor(df$session)) # participant ID

# order time
df$TID <-
  factor(df$date_period, levels = unique(dtStudents$raw.daily$date_period))
df$TIDnum <- as.numeric(df$TID) # get numeric TID

# check whether time ordering worked
df <- df %>%
  arrange(PID, TID) # %>%
# View()

# Interaction as Factor
df$interaction.f <-
  factor(df$Interaction,
    levels = c("no interaction", "Dutch", "Non-Dutch")
  )
df$intNL <- ifelse(df$Interaction == "Dutch", 1, 0)
df$intNonNL <- ifelse(df$Interaction == "Non-Dutch", 1, 0)

# -------------------------------------------------------------------------------------------------------------
#                                       Combine Variables
# -------------------------------------------------------------------------------------------------------------
# Relatedness
pairs.panels.new(
  df[c("RelatednessSelf", "RelatednessOther")],
  labels = c(
    "I shared information about myself.",
    "X shared information about themselves."
  )
)

df$RelatednessInteraction <-
  rowMeans(df[c("RelatednessSelf", "RelatednessOther")], na.rm = T)
df$RelatednessInteraction[df$RelatednessInteraction == "NaN"] <-
  NA
# Relatedness Overall (JANNIS NOT SURE THESE ARE CORRECT, CHANGE ROWS?; J: Changed "NaN" in df$RelatednessInteraction to NA() should work now)
df$Relatedness <-
  rowMeans(df[, c("RelatednessInteraction", "RelatednessNoInteraction")],
    na.rm =
      T
  )
# Pro-Sociality
df$ProSo <-
  rowMeans(df[, c("ProSo1", "ProSo2", "ProSo3", "ProSo4")], na.rm = T)
# Anti-Sociality
df$AntiSo <-
  rowMeans(df[, c("AntiSo1", "AntiSo2", "AntiSo3", "AntiSo4")], na.rm = T)


# -------------------------------------------------------------------------------------------------------------
#                                 Add Variables related to interaction partner
# -------------------------------------------------------------------------------------------------------------
# create function for later lapply
createIntPartDf <- function(inp) {
  # prepare the dataframe so that we can forloop over it later
  tmp <- data.frame(
    CC = as.character(inp$CC),
    NewCC = as.character(inp$NewCC),
    NewName = as.character(inp$NewName),
    NewCloseness = inp$NewCloseness,
    NewGender = inp$NewGender,
    NewEthnicity = as.character(inp$NewEthnicity),
    NewRelationship = as.character(inp$NewRelationship)
  )

  tmp$CC2 <- recode(tmp$CC, "SOMEONE ELSE" = "NA")
  tmp$CC2 <-
    ifelse(
      tmp$CC == 1 |
        tmp$CC == "SOMEONE ELSE",
      as.character(tmp$NewName),
      as.character(tmp$CC2)
    )
  # maybe add [[:space:]]\b to remove space before word boundary or ^[[:space:]] to remove space in the beginning of a string
  tmp$CC2 <- gsub("^[[:space:]]", "", tmp$CC2)
  tmp$NewName <- gsub("^[[:space:]]", "", tmp$NewName)

  # open the variables that will be filled up in the foor-loop
  tmp$closeness <- rep(NA, nrow(tmp))
  tmp$gender <- rep(NA, nrow(tmp))
  tmp$ethnicity <- rep(NA, nrow(tmp))
  tmp$relationship <- rep(NA, nrow(tmp))

  # Run the for-loop. It finds the variables related to the name of the interaction partner. If there is a repeating interaction
  # partner (i.e. CC2) it takes the value (i.e. NewCloseness) from the first interaction (i.e. NewName)
  for (i in 1:nrow(tmp)) {
    if (is.na(tmp$CC2[i])) {
      next
    } else {
      tmp$closeness[i] <-
        na.omit(tmp$NewCloseness[as.character(tmp$CC2[i]) == as.character(tmp$NewName)])[1] # find closeness where CC2 matches NewName (na.omit + [1] to get the number)
      tmp$gender[i] <-
        na.omit(tmp$NewGender[as.character(tmp$CC2[i]) == as.character(tmp$NewName)])[1] # (na.omit + [1] to get the number and not the rest of the na.omit list)
      tmp$ethnicity[i] <-
        na.omit(as.character(tmp$NewEthnicity[as.character(tmp$CC2[i]) == as.character(tmp$NewName)]))[1] # PROBLEM IS THAT THERE ARE TOO MANY NA's: Difficult to deal with
      tmp$relationship[i] <-
        na.omit(as.character(tmp$NewRelationship[as.character(tmp$CC2[i]) == as.character(tmp$NewName)]))[1]
    }
  }

  out <- tmp
  out
}

# split df per participants and run function
PP <- split(df, df$PID)
PP <- lapply(PP, createIntPartDf)
rm(createIntPartDf)

# add variables back to df
remergePP <- do.call(rbind.data.frame, PP)
colnames(remergePP) <-
  paste(colnames(remergePP), "_Calc", sep = "")
df <- cbind(df, remergePP)
rm(remergePP, PP)

# -------------------------------------------------------------------------------------------------------------
#                                 Center Relevant Variables
# -------------------------------------------------------------------------------------------------------------

df <- df %>%
  group_by(PID) %>%
  mutate(
    KeyNeedFullfillment.cm = mean(KeyNeedFullfillment, na.rm = TRUE),
    # cluster mean (mean of PP)
    KeyNeedFullfillment.cwc = KeyNeedFullfillment - KeyNeedFullfillment.cm,
    # cluster mean centered (within PP centered)
    closeness.cm = mean(closeness_Calc, na.rm = TRUE),
    closeness.cwc = closeness_Calc - closeness.cm
  ) %>%
  ungroup()

# store
dtStudents$full <- df
rm(df)

# Between participants contact frequency
studentContactFreq <- dtStudents$full %>%
  group_by(PID) %>%
  summarise(
    n = n(),
    SumContactNL = sum(InteractionDumDutch),
    PercContactNL = SumContactNL / n * 100,
    SumContactNLAll = sum(ContactNum[InteractionDumDutch == 1], na.rm = TRUE),
    AvAttitude = mean(AttitudesDutch, na.rm = TRUE),
    AvQuality = mean(quality_overall, na.rm = TRUE)
  ) %>%
  mutate(
    WinSumContactNL = DescTools::Winsorize(SumContactNL),
    WinSumContactNLAll = DescTools::Winsorize(SumContactNLAll)
  )

# dataframe where interaction types are recoded
studentInteractionType <- dtStudents$full %>%
  mutate(
    NonDutchContact = tidyr::replace_na(NonDutchContact, 2), # make second non-Dutch countable
    NonDutchContact = NonDutchContact*-1+2 # recode (yes = 1 -> 1, no = 2 -> 0)
  ) %>%
  mutate(
    OutgroupInteraction = factor(
      InteractionDumDutch,
      levels = c(0, 1),
      labels = c("No", "Yes")
    ),
    NonOutgroupInteraction = factor(
      rowSums(select(., c(InteractionDumNonDutch, NonDutchContact))), # combine the two non-Dutch Q.,
      levels = c(0, 1),
      labels = c("No", "Yes")
    )
  )

# select a subset of IDs to display in plots
studentPltIDs <-
  studentInteractionType %>%
  group_by(PID) %>%
  summarise(n = n()) %>%
  slice_max(n, n = 20) %>% # chose the 20 with the most number of measurements
  select(PID) %>%
  as.matrix %>%
  as.vector

# select a subset of IDs to display in plots (only outgroup interactions)
studentOutPltIDs <-
  studentInteractionType %>%
  filter(OutgroupInteraction == "Yes") %>%
  group_by(PID) %>%
  summarise(n = n()) %>%
  slice_max(n, n = 20) %>% # chose the 20 with the most number of measurements
  select(PID) %>%
  as.matrix %>%
  as.vector

# Center within and between
# divide into trait and state
studentWithinBetween <-
  MlTraitState(
    data = studentInteractionType,
    id = "PID",
    selection =
      c(
        "KeyNeedFullfillment",
        "Competence",
        "Autonomy",
        "Relatedness",
        "AttitudesDutch",
        "quality_overall",
        "OutgroupInteraction",
        "NonOutgroupInteraction"
      )
  )
studentOutWithinBetween <-
  MlTraitState(
    data = studentInteractionType %>% filter(OutgroupInteraction == "Yes"),
    id = "PID",
    selection =
      c(
        "KeyNeedFullfillment",
        "Competence",
        "Autonomy",
        "Relatedness",
        "AttitudesDutch",
        "quality_overall"
      )
  )

Medical

For the medical professional sample, the data transformation stage had five main aims:

  1. We first create person, survey type, and measurement ID variables.
  2. We then created indices of scales. Some indices were multi-item scales while some indices combine equivalent measurement for different situational circumstances (e.g., competence perceptions after interactions and at measurement occasions without interactions).
  3. We cluster mean-center key variables within participants.
  4. Finally, we calculated several basic participant summaries (averages across all measurement occasions).
df <- dtMedical$clean

# Add ID variables
df$PID <- as.numeric(factor(df$session)) # participant ID

# order time
df$TID <-
  factor(df$date_period, levels = unique(dtMedical$raw.daily$date_period))
df$TIDnum <- as.numeric(df$TID) # get numeric TID

# check whether time ordering worked
df <- df %>%
  arrange(PID, TID) # %>%
# View()

# Interaction as Factor
df$interaction.f <-
  factor(df$Interaction,
    levels = c("no interaction", "Dutch", "Non-Dutch")
  )
df$intNL <- ifelse(df$Interaction == "Dutch", 1, 0)
df$intNonNL <- ifelse(df$Interaction == "Non-Dutch", 1, 0)

df <- df %>%
  mutate(
    NonDutchContact = replace_na(NonDutchNum, 0), # make second non-Dutch countable
    NonDutchContact = ifelse(NonDutchContact > 1, 1, 0) # recode (yes = 1 -> 1, no = 2 -> 0)
  ) %>%
  mutate(
    OutgroupInteraction = factor(
      InteractionDumDutch,
      levels = c(0, 1),
      labels = c("No", "Yes")
    ),
    NonOutgroupInteraction = factor(
      rowSums(select(., c(InteractionDumNonDutch, NonDutchContact)), na.rm = TRUE), # combine the two non-Dutch Q.,
      levels = c(0, 1),
      labels = c("No", "Yes")
    )
  )



# -------------------------------------------------------------------------------------------------------------
#                                       Combine Variables
# -------------------------------------------------------------------------------------------------------------
# Relatedness
pairs.panels.new(
  df[c("RelatednessSelf", "RelatednessOther")],
  labels = c(
    "I shared information about myself.",
    "X shared information about themselves."
  )
)

df$RelatednessInteraction <-
  rowMeans(df[c("RelatednessSelf", "RelatednessOther")], na.rm = T)
df$RelatednessInteraction[df$RelatednessInteraction == "NaN"] <-
  NA
# Relatedness Overall (JANNIS NOT SURE THESE ARE CORRECT, CHANGE ROWS?; J: Changed "NaN" in df$RelatednessInteraction to NA() should work now)
df$Relatedness <-
  rowMeans(df[, c("RelatednessInteraction", "RelatednessNoInteraction")],
           na.rm = TRUE)
# Pro-Sociality
df$ProSo <-
  rowMeans(df[, c("ProSo1", "ProSo2", "ProSo3", "ProSo4")], na.rm = T)
# Anti-Sociality
df$AntiSo <-
  rowMeans(df[, c("AntiSo1", "AntiSo2", "AntiSo3", "AntiSo4")], na.rm = T)

# Allport's Conditions
df %>%
  #filter(OutgroupInteraction == "Yes") %>%
  select(
    InteractionContextEqualStatus,
    KeyNeedShared,
    InteractionContextCooperative,
    InteractionContextvoluntary
  ) %>%
  pairs.panels.new

df %>%
  #filter(OutgroupInteraction == "Yes") %>%
  select(
    InteractionContextEqualStatus,
    KeyNeedShared,
    InteractionContextCooperative,
    InteractionContextvoluntary
  ) %>%
  psych::describe(., skew=F,ranges=T) %>%
  as.data.frame() %>%
  select(-vars) %>%
  kable(., caption = "Descriptives of Allport's Condition items") %>% 
  kable_styling("hover", full_width = F, latex_options = "hold_position")
Table 1: Descriptives of Allport’s Condition items
n mean sd min max range se
InteractionContextEqualStatus 3099 81.84 23.58 0 100 100 0.4236
KeyNeedShared 3110 84.90 18.74 0 100 100 0.3360
InteractionContextCooperative 3099 85.67 18.35 0 100 100 0.3296
InteractionContextvoluntary 3099 84.14 22.28 0 100 100 0.4002
iaWorkerAllport <- 
  df %>%
  #filter(OutgroupInteraction == "Yes") %>%
  select(
    InteractionContextEqualStatus,
    KeyNeedShared,
    InteractionContextCooperative,
    InteractionContextvoluntary
  )

sjPlot::tab_itemscale(iaWorkerAllport)
Component 1
Missings Mean SD Skew Item Difficulty Item Discrimination α if deleted
24.54 % 81.84 23.58 -1.43 0.82 0.52 0.64
24.28 % 84.9 18.74 -1.78 0.85 0.42 0.69
24.54 % 85.67 18.35 -1.55 0.86 0.60 0.59
24.54 % 84.14 22.28 -1.7 0.84 0.47 0.67
Mean inter-item-correlation=0.386 · Cronbach’s α=0.709
pca <- parameters::principal_components(iaWorkerAllport)
factor.groups <- parameters::closest_component(pca)

sjPlot::tab_itemscale(iaWorkerAllport, factor.groups)
Component 1
Missings Mean SD Skew Item Difficulty Item Discrimination α if deleted
24.54 % 81.84 23.58 -1.43 0.82 0.52 0.64
24.28 % 84.9 18.74 -1.78 0.85 0.42 0.69
24.54 % 85.67 18.35 -1.55 0.86 0.60 0.59
24.54 % 84.14 22.28 -1.7 0.84 0.47 0.67
Mean inter-item-correlation=0.386 · Cronbach’s α=0.709
ltm::cronbach.alpha(na.omit(iaWorkerAllport), CI = TRUE)
## 
## Cronbach's alpha for the 'na.omit(iaWorkerAllport)' data-set
## 
## Items: 4
## Sample units: 3099
## alpha: 0.709
## 
## Bootstrap 95% CI based on 1000 samples
##  2.5% 97.5% 
## 0.689 0.729
data <- 
  df %>%
  select(
    PID,
    TIDnum,
    InteractionContextEqualStatus,
    KeyNeedShared,
    InteractionContextCooperative,
    InteractionContextvoluntary
  ) %>%
  drop_na %>%
  melt(
    ., 
    id.vars = c("PID", "TIDnum")
  )


horst::nestedAlpha(item.level.1 = "value",
                   level.2      = "TIDnum",
                   level.3      = "PID",
                   data         = data)
##  alpha 
## 0.7829
rm(data)

iaWorkerAllportScale <- 
  iaWorkerAllport %>%
  Scale::Scale() %>%
  Scale::ItemAnalysis()

df$AllportsCondition <-
  scoreItems(
    keys = c(1, 1, 1, 1),
    items = df %>% select(
      InteractionContextEqualStatus,
      KeyNeedShared,
      InteractionContextCooperative,
      InteractionContextvoluntary
    ),
    min = 0,
    max = 100
  )$scores

as.data.frame(psych::describe(df$AllportsCondition, skew=T)) %>%
  mutate(vars = "Allport's Conditions Index") %>%
  kable(., caption = "Allport's Conditions: Scale Descriptives", row.names = FALSE) %>% 
  kable_styling("hover", full_width = F, latex_options = "hold_position")
Table 1: Allport’s Conditions: Scale Descriptives
vars n mean sd median trimmed mad min max range skew kurtosis se
Allport’s Conditions Index 4107 86.49 13.88 93.75 88.6 9.266 0 100 100 -1.454 2.406 0.2165
histogram(df$AllportsCondition)

# -------------------------------------------------------------------------------------------------------------
#                                 Add Variables related to interaction partner
# -------------------------------------------------------------------------------------------------------------
# create function for later lapply
createIntPartDf <- function(inp) {
  # prepare the dataframe so that we can forloop over it later
  tmp <- data.frame(
    CC = as.character(inp$CC),
    NewCC = as.character(inp$NewCC),
    NewName = as.character(inp$NewName),
    NewCloseness = inp$NewCloseness,
    NewGender = inp$NewGender,
    NewEthnicity = as.character(inp$NewEthnicity),
    NewRelationship = as.character(inp$NewRelationship)
  )

  tmp$CC2 <- recode(tmp$CC, "SOMEONE ELSE" = "NA")
  tmp$CC2 <-
    ifelse(
      tmp$CC == 1 |
        tmp$CC == "SOMEONE ELSE",
      as.character(tmp$NewName),
      as.character(tmp$CC2)
    )
  # maybe add [[:space:]]\b to remove space before word boundary or ^[[:space:]] to remove space in the beginning of a string
  tmp$CC2 <- gsub("^[[:space:]]", "", tmp$CC2)
  tmp$NewName <- gsub("^[[:space:]]", "", tmp$NewName)

  # open the variables that will be filled up in the foor-loop
  tmp$closeness <- rep(NA, nrow(tmp))
  tmp$gender <- rep(NA, nrow(tmp))
  tmp$ethnicity <- rep(NA, nrow(tmp))
  tmp$relationship <- rep(NA, nrow(tmp))

  # Run the for-loop. It finds the variables related to the name of the interaction partner. If there is a repeating interaction
  # partner (i.e. CC2) it takes the value (i.e. NewCloseness) from the first interaction (i.e. NewName)
  for (i in 1:nrow(tmp)) {
    if (is.na(tmp$CC2[i])) {
      next
    } else {
      tmp$closeness[i] <-
        na.omit(tmp$NewCloseness[as.character(tmp$CC2[i]) == as.character(tmp$NewName)])[1] # find closeness where CC2 matches NewName (na.omit + [1] to get the number)
      tmp$gender[i] <-
        na.omit(tmp$NewGender[as.character(tmp$CC2[i]) == as.character(tmp$NewName)])[1] # (na.omit + [1] to get the number and not the rest of the na.omit list)
      tmp$ethnicity[i] <-
        na.omit(as.character(tmp$NewEthnicity[as.character(tmp$CC2[i]) == as.character(tmp$NewName)]))[1] # PROBLEM IS THAT THERE ARE TOO MANY NA's: Difficult to deal with
      tmp$relationship[i] <-
        na.omit(as.character(tmp$NewRelationship[as.character(tmp$CC2[i]) == as.character(tmp$NewName)]))[1]
    }
  }

  out <- tmp
  out
}

# split df per participants and run function
PP <- split(df, df$PID)
PP <- lapply(PP, createIntPartDf)
rm(createIntPartDf)

# add variables back to df
remergePP <- do.call(rbind.data.frame, PP)
colnames(remergePP) <-
  paste(colnames(remergePP), "_Calc", sep = "")
df <- cbind(df, remergePP)
rm(remergePP, PP)

# -------------------------------------------------------------------------------------------------------------
#                                 Center Relevant Variables
# -------------------------------------------------------------------------------------------------------------
# divide into trait and state
medicalOutWithinBetween <-
  MlTraitState(
    data = df %>% filter(OutgroupInteraction == "Yes"),
    id = "PID",
    selection =
      c(
        "KeyNeedFulfillment",
        "Competence",
        "Autonomy",
        "Relatedness",
        "AllportsCondition",
        "AttitudesDutch",
        "qualityOverall"
      )
  )

medicalWithinBetween <-
  MlTraitState(
    data = df,
    id = "PID",
    selection =
      c(
        "KeyNeedFulfillment",
        "Competence",
        "Autonomy",
        "Relatedness",
        "AllportsCondition",
        "AttitudesDutch",
        "qualityOverall",
        "OutgroupInteraction",
        "NonOutgroupInteraction"
      )
  )

df <- # keep only for compatibility of old framgents
  MlTraitState(
    data = df,
    id = "PID",
    selection =
      c(
        "KeyNeedFulfillment",
        "Competence",
        "Autonomy",
        "Relatedness",
        "AllportsCondition",
        "AttitudesDutch",
        "qualityOverall"
      )
  )

# store
dtMedical$full <- df
rm(df)


# Between participants contact frequency
medicalContactFreq <- 
  dtMedical$full %>%
  group_by(PID) %>%
  summarise(
    n = n(),
    SumContactNL = sum(InteractionDumDutch, na.rm = TRUE),
    PercContactNL = SumContactNL / n * 100,
    SumContactNLAll = sum(ContactNum[InteractionDumDutch == 1], na.rm = TRUE),
    AvAttitude = mean(AttitudesDutch, na.rm = TRUE),
    AvQuality = mean(qualityOverall, na.rm = TRUE)
  ) %>%
  mutate(
    WinSumContactNL = DescTools::Winsorize(SumContactNL),
    WinSumContactNLAll = DescTools::Winsorize(SumContactNLAll)
  )

# select a subset of IDs to display in plots
medicalPltIDs <-
  dtMedical$full %>%
  group_by(PID) %>%
  summarise(n = n()) %>%
  slice_max(n, n = 20) %>% # chose the 20 with the most number of measurements
  select(PID) %>%
  as.matrix %>%
  as.vector

# select a subset of IDs to display in plots (only outgroup interactions)
medicalOutPltIDs <-
  dtMedical$full %>%
  filter(OutgroupInteraction == "Yes") %>%
  group_by(PID) %>%
  summarise(n = n()) %>%
  slice_max(n, n = 20) %>% # chose the 20 with the most number of measurements
  select(PID) %>%
  as.matrix %>%
  as.vector

Worker Sample

The first sample we assess is the smaller sojourner study. We will sequentially test our main hypotheses for this study:

  1. Based on the most general understanding of the contact hypothesis, an increase in frequency and quality of contact should jointly account for changes in more favorable outgroup attitudes.
    1. Participants with more intergroup interactions should have a more favorable outgroup attitudes.
    2. Outgroup attitudes should be higher after an intergroup interaction compared to a non-outgroup interaction.
    3. Participants with more intergroup interactions should have a more favorable outgroup attitudes depending on the average interaction quality.
  2. Based on our proposal, intergroup interactions with higher situational core need fulfillment should predict more favorable outgroup attitudes due to more positive interaction quality perceptions.
    1. Outgroup attitudes should be more favorable after intergroup interactions with high key need fulfillment.
    2. Interaction Quality should be perceived as more positive after intergroup interactions with higher key need fulfillment.
    3. The variance explained in outgroup attitudes by key need fulfillment should to a large extend be assumed by interaction quality.
    4. The effect of key need fulfillment on outgroup attitudes should be specific to intergroup interactions and not be due to need fulfillment in general. Thus, the effect of key need fulfillment on outgroup attitudes should stronger for intergroup interact than for ingroup interactions.
    5. The effect of key need fulfillment on outgroup attitudes should be persist even when taking other fundamental psychological needs into account. Thus, the effect of key need fulfillment on outgroup attitudes should remain strong even after controlling for autonomy, competence, and relatedness fulfillment during the interaction (cf., self-determination theory).

Data Description

Participants

# combine education measure
workerOccupation <- 
  dtWorker$clean %>%
  select(PID, starts_with("occupation")) %>%
  mutate_all(as_factor) %>% 
  mutate_all(as.character) %>%
  replace(is.na(.), "") %>%
  unite("occupation", -PID, sep = "/") %>%
  mutate(occupation = trimws(occupation, which = "both", whitespace = "[/]")) %>%
  distinct

# summarize participant characteristics
workerSampleInfo <- 
  dtWorker$clean %>%
  group_by(PID) %>%
  summarise(
    dailiesN = n(), 
    age = age,
    gender = as_factor(Gender),
    edu = as_factor(education_level),
    nationality = as_factor(nationality)
  ) %>%
  distinct

# add occupation variable
workerSampleInfo <- merge(workerSampleInfo, workerOccupation)
rm(workerOccupation)

# look at frequencies of characteristics 
workerSampleInfo %>% 
  select(
    "Number of Measurements" = dailiesN,
    Age = age,
    Gender = gender,
    Education = edu,
    Nationality = nationality,
    Occupation = occupation
  ) %>%
  mutate(
    Nationality = as.character(Nationality)
  ) %>%
  tbl_summary(.,
              sort = list(everything() ~ "frequency"))
Characteristic N = 231
Number of Measurements 59 (56, 62)
Age 23.0 (21.5, 26.5)
Gender
Female 19 (83%)
Male 4 (17%)
other 0 (0%)
Education
Bachelor 15 (75%)
Master 4 (20%)
PhD 1 (5.0%)
Unknown 3
Nationality
Germany 10 (43%)
Brazil 2 (8.7%)
Republic of Moldova 2 (8.7%)
Bulgaria 1 (4.3%)
China 1 (4.3%)
Czech Republic 1 (4.3%)
Eritrea 1 (4.3%)
Hungary 1 (4.3%)
Romania 1 (4.3%)
Slovakia 1 (4.3%)
Spain 1 (4.3%)
Ukraine 1 (4.3%)
Occupation
student 15 (65%)
part time work/student 3 (13%)
looking for work 2 (8.7%)
full time work 1 (4.3%)
internship 1 (4.3%)
student/looking for work 1 (4.3%)

1 Median (IQR); n (%)

For the first study, we recruited 23 migrants using the local paid participant pool and specifically targeted non-Dutch migrants to participate in our study. Participants reported on their interactions for at least 30 days with two daily measures (capturing the morning and afternoon). With this design, we aimed at getting 50-60 measurements per participant ( = 53.26, = 16.72, = 1225). This is a common number of measurements found in experience sampling studies and offers sufficient power to model processes within and between participants . Participants were compensated for their participation with up to 34 Euros – each two Euros for pre- and post-questionnaire as well as 50 Eurocents for every experience sampling measurement occasion. The sample consisted of relatively young, educated, and western migrants from the global north (\(M_{age}\) = 24.35, \(SD_{age}\) = 4.73, 19 women, 15 students). The sample accurately describes one of the largest groups of migrants in the region .

Interactions

# duration of survey should include median and MAD
workerInteractions <- dtWorker$clean %>%
  dplyr::select(Duration__in_seconds_) %>%
  mutate_all(as.numeric)

workerInteractions %>%
  psych::describe(., trim = .2) %>%
  as.data.frame %>%
  mutate(vars = c("Duration [in seconds]"), # rownames(.),
         na = nrow(dtWorker$clean)-n,
         win.mean = sapply(workerInteractions,psych::winsor.mean,simplify=T),
         win.sd = sapply(workerInteractions,psych::winsor.sd,simplify=T)) %>%
  dplyr::select(characteristic = vars, n, na, 
                mean, `mean win` = win.mean, `mean trim` = trimmed, median,
                sd, `sd win` = win.sd, MAD = mad, min, max,
                skew, kurtosis) %>%
  kbl(., 
      #label = "",
      caption = "Study 1: Duration of Measurement in Seconds",
      format = "html", 
      #linesep = "",
      #booktabs = T,
      row.names = F,
      digits = 2,
      align = c('l', rep('c', ncol(.)-1)))  %>%
  add_header_above(., c(" " = 3,"Centrality" = 4, "Dispersion" = 5, "Distribution" = 2)) %>%
  footnote(general = "'na' indicates the number of measurements for which measurement duration is unknown.") %>%
  kable_classic(full_width = F, 
                lightable_options = "hover", 
                html_font = "Cambria")
Table 2: Study 1: Duration of Measurement in Seconds
Centrality
Dispersion
Distribution
characteristic n na mean mean win mean trim median sd sd win MAD min max skew kurtosis
Duration [in seconds] 1225 0 610.8 159.4 150.2 142 3226 63.3 78.58 49 59342 10.72 139.2
Note:
‘na’ indicates the number of measurements for which measurement duration is unknown.
workerInteractionType %>%
  select(OutgroupInteraction,
         NonOutgroupInteraction) %>%
  tbl_summary(.,
              sort = list(everything() ~ "frequency"))
Characteristic N = 1,2251
Did you meet a Dutch person this morning? (In person interaction for at least 10 minutes) 387 (32%)
Did you meet non-Dutch people this morning? (in person for at least 10 minutes) 778 (64%)

1 n (%)

Variable distributions

# calculate correlations and descriptives
workerMlCor <-
  MlCorMat(
    data = workerInteractionType,
    id = "PID",
    selection = c("keyMotiveFulfilled", "competence.daily.all", "autonomy.daily.all", "relatedness.daily.all", "quality_overall_1", "thermometerDutch_1"),
    labels = c("Core Need", "Competence", "Autonomy", "Relatedness", "Quality", "Attitudes NL")
  ) 

workerMlCor %>%
  kable(
    .,
    caption = "Worker: Multilevel Core Variable Descriptives",
    format = "html",
    #booktabs = TRUE,
    linesep = linesep(c(ncol(.))),
    align = c("l", rep("c", ncol(.) - 1))
  ) %>%
  pack_rows("Correlations", 1, ncol(workerMlCor)) %>%
  pack_rows("Descriptives", ncol(workerMlCor)+1, nrow(workerMlCor)) %>%
  footnote(
    general = c(
      "Upper triangle: Between-person correlations;",
      "Lower triangle: Within-person correlations;",
      "*** p < .001, ** p < .01,  * p < .05"
    )
  ) %>%
  kable_classic(full_width = F, 
                lightable_options = "hover", 
                html_font = "Cambria")
Table 3: Worker: Multilevel Core Variable Descriptives
Core Need Competence Autonomy Relatedness Quality Attitudes NL
Correlations
Core Need 0.82*** 0.60*** 0.33 0.52** -0.03
Competence 0.36*** 0.89*** 0.26 0.39 -0.23
Autonomy 0.28*** 0.22*** 0.31 0.57** 0.02
Relatedness 0.50*** 0.39*** 0.37*** -0.07 0.14
Quality 0.17*** 0.44*** 0.27*** 0.26*** 0.50*
Attitudes NL 0.24*** 0.36*** 0.24*** 0.37*** 0.52***
Descriptives
Grand Mean 27.95 12.10 22.17 5.29 24.10 71.49
Between SD 14.68 13.72 12.09 14.59 9.50 12.91
Within SD 20.83 20.89 15.15 23.29 18.01 8.11
ICC(1) 0.29 0.28 0.38 0.28 0.18 0.70
ICC(2) 0.96 0.95 0.97 0.95 0.79 0.99
Note:
Upper triangle: Between-person correlations;
Lower triangle: Within-person correlations;
*** p < .001, ** p < .01, * p < .05

Contact Hypothesis

We test the most general contact hypothesis in two steps. First, we assess whether more intergroup interactions are related to to more positive outgroup attitudes. Second, we test whether a potential positive effect on outgroup attitudes depends on the interaction quality (jointly with the number of interactions).

Interaction Frequency and Attitudes

To test the impact of the overall number of interactions, we hope to find that there is a significant relationship between the number of interactions a participant had and the average outgroup attitude.

\[\begin{equation} \tag{1} r_{ContactFrequency, OutgroupAttitudes} \neq 0 \end{equation}\]

# correlation panel
pairs.panels.new(
  workerContactFreq %>% select(SumContactNL, SumContactNLAll, AvAttitude),
  labels = c(
    "Sum:\nNumer of beeps with Outgroup Contact (NL)",
    "Sum:\nNumber of Outgroup Contacts (NL)",
    "Mean:\nOutgroup Attitudes (NL)"
  )
)

# correlation panel with interaction sums winsorized
pairs.panels.new(
  workerContactFreq %>% select(WinSumContactNL, WinSumContactNLAll, AvAttitude),
  labels = c(
    "Sum:\nNumer of beeps with Outgroup Contact (NL)\n[Winsorized]",
    "Sum:\nNumber of Outgroup Contacts (NL)\n[Winsorized]",
    "Mean:\nOutgroup Attitudes (NL)"
  )
)

We find that neither the number of interactions nor the number of measurement beeps with an interaction are significantly related with the average outgroup attitudes. This is to say that within our data, participants with more outgroup interactions did not have significantly more positive outgroup attitudes. This might be due to the aggregation within the participants or the small sample size of between participant data. Nonetheless, the aggregate data does not support the notion that simply having more interactions with an outgroup results in more positive outgroup attitudes.

Outgroup Attitudes by Interaction Type

In a next step we take into account that having an interaction with an outgroup member, does not happen in a social vacuum. Participants who indicated that they had an interaction with an outgroup member include measurement occasions during which someone either only had an interaction with an outgroup member as well as times during which a person also had interaction(s) with a non-Dutch person. Inversely, participants who indicated that they did not have an interaction with a Dutch person might either have had no interaction at all or had an interaction with a non-Dutch person. We, thus consider all possible combinations and their independent influences on outgroup attitudes.

We first assess the impact of the different interaction types across all measurement points (lumping all beeps together).

\[\begin{equation} \tag{2} Attitude = OutgroupInteraction + NonOutgroupInteraction \end{equation}\]

# between participants interaction type
workerAttInteractionType <- workerInteractionType %>%
  select(
    PID,
    OutgroupInteraction,
    NonOutgroupInteraction,
    Attitude = thermometerDutch_1
  ) %>%
  mutate(InteractionType = paste(
    ifelse(OutgroupInteraction == "Yes", "Out", ifelse(OutgroupInteraction == "No", "NoOut", NA)),
    ifelse(NonOutgroupInteraction == "yes", "In", ifelse(NonOutgroupInteraction == "no", "NoIn", NA)),
    sep = ", "
  ))

# violin plot of attitudes by interaction type group
ggplot(workerAttInteractionType, aes(y=Attitude, x=OutgroupInteraction, group = interaction(OutgroupInteraction, NonOutgroupInteraction), fill=NonOutgroupInteraction))+
  geom_violin(trim = T, width=.8, position = position_dodge(0.8)) +
  geom_boxplot(width=0.04,
               position = position_dodge(.8),
               outlier.colour = NULL, 
               outlier.shape = NA, 
               notch=F, fill="black", 
               lwd=1, color="black") + 
  # geom_jitter(width = 0.15,
  #            shape=18,
  #            alpha=.2, size=1) +
  stat_summary(fun.y=mean, 
               geom="point", 
               shape=16, 
               size=1, 
               position = position_dodge(0.8),
               colour="white") +
  #stat_summary(fun.data="mean_sdl",  
  #             fun.args = list(mult=1), 
  #             geom="pointrange", 
  #             color = "red", 
  #             width=.2)+
  stat_summary(geom = "crossbar", 
               width=0.04, 
               fatten=0, 
               position = position_dodge(0.8),
               color="white", 
               fun.data = function(x){ return(c(y=median(x), 
                                                ymin=median(x), 
                                                ymax=median(x))) })+
  ylab("Outgroup Attitudes")+
  xlab("Outgroup Interaction") +
  ggtitle("Violin Plot:\nOutgroup Attitudes by Interaction Type")+
  scale_y_continuous(
    limits = c(0, 100),
    breaks = c(0, 15, 30, 40, 50, 60, 70, 85, 100),
    labels = c(
      "Very cold or unfavorable feelings 0°",
      "Quite cold and unfavorable feelings 15°",
      "Fairly cold and unfavorable feelings 30°",
      "A bit cold and unfavorable feelings 40°",
      "No feeling at all 50°",
      "A bit warm and favorable feelings 60°",
      "Fairly warm and favorable feelings 70° ",
      "Quite warm and favorable feelings 85° ",
      "Very warm and favorable feelings 100° "
    )
  ) +
  #coord_flip()+
  scale_fill_brewer(palette = "Pastel2", name = "Non-Outgroup Interaction")+
  theme_Publication()+
  theme(strip.background =element_rect(fill="black", color="black"),
        strip.text = element_text(colour = 'white', face="bold"))#,

        #panel.border = element_rect(color="grey"),
        #legend.position="none")



# summarize by interaction type group
workerContactType <- dtWorker$full %>%
  group_by(
    Contact_dum,
    inNonDutch
  ) %>%
  summarise(
    n = n(),
    AttitudeM = mean(thermometerDutch_1, na.rm = T),
    AttitudeSD = sd(thermometerDutch_1, na.rm = T),
    AttitudeSE = AttitudeSD / sqrt(n),
    AttitudeLwr = AttitudeM - 1.96 * AttitudeSE,
    AttitudeUpr = AttitudeM + 1.96 * AttitudeSE
  ) %>%
  ungroup() %>%
  mutate(InteractionType = paste(
    ifelse(Contact_dum == 1, "Out", "NoOut"),
    ifelse(inNonDutch == 1, "In", "NoIn"),
    sep = ", "
  ))

# plot bar chart (alternative with less information about actual data)
workerAttInteractionTypeBar <- ggplot(
  workerContactType,
  aes(
    y = AttitudeM,
    x = as_factor(Contact_dum),
    fill = as_factor(inNonDutch)
  )
) +
  geom_bar(
    stat = "identity",
    color = "black",
    position = position_dodge()
  ) +
  geom_errorbar(aes(ymin = AttitudeM, ymax = AttitudeUpr),
    width = .2,
    position = position_dodge(.9)
  ) +
  labs(
    fill = "Non-Outgroup Interaction",
    x = "Outgroup Interaction",
    y = "Outgroup Attitude",
    title = "Outgroup Attitudes by Interaction Type [95% CI]"
  ) +
  scale_fill_grey(
    start = 0.2,
    end = 0.8
  ) +
  scale_y_continuous(
    limits = c(0, 100),
    breaks = c(0, 15, 30, 40, 50, 60, 70, 85, 100),
    labels = c(
      "Very cold or unfavorable feelings 0°",
      "Quite cold and unfavorable feelings 15°",
      "Fairly cold and unfavorable feelings 30°",
      "A bit cold and unfavorable feelings 40°",
      "No feeling at all 50°",
      "A bit warm and favorable feelings 60°",
      "Fairly warm and favorable feelings 70° ",
      "Quite warm and favorable feelings 85° ",
      "Very warm and favorable feelings 100° "
    )
  ) +
  theme_Publication()
# create list to store Worker models
mdlWorker <- list()

# regression
mdlWorker$lmAttInt <-
  lm(thermometerDutch_1 ~ OutgroupInteraction * NonOutgroupInteraction,
    data = workerInteractionType
  )
# summary(lmWorkerAttInteraction)

summ(
  mdlWorker$lmAttInt,
  confint = TRUE,
  digits = 3,
  center = TRUE
)
Observations 1225
Dependent variable thermometerDutch_1
Type OLS linear regression
F(3,1221) 4.867
0.012
Adj. R² 0.009
Est. 2.5% 97.5% t val. p
(Intercept) 69.507 67.899 71.116 84.796 0.000
OutgroupInteraction 3.620 0.378 6.862 2.191 0.029
NonOutgroupInteraction -0.513 -2.593 1.566 -0.484 0.628
OutgroupInteraction:NonOutgroupInteraction -0.098 -4.021 3.826 -0.049 0.961
Standard errors: OLS; Continuous predictors are mean-centered.

We find that while controlling for interactions with non-Dutch people, outgroup attitudes were significantly higher when participants had an interaction with a Dutch person. The effect is relatively small (3.62 points on a 0–100 scale). More importantly, however, this analysis lumps all ESM beeps from every participants together and ignores that the data is nested within participants.

Interaction Frequency and Interaction Quality

In a next step we check whether the effect of outgroup interactions, in part, depends on the quality during the interactions. In this step we aggregate the within person data to remove any dependency issues. Additionally, because we can only assess interaction quality when there is an interaction, it is difficult to assess this with the interaction dummy as a within person predictor. Instead, we will use an aggregate measure of interaction quality and average interaction quality to consider the two predictors jointly. Such an aggregation in essence mirrors the general ‘past recall’ approach that dominates the field. The only difference being that we aggregate data that participants recorded shortly after the actual interactions, wheras in most recall studies this aggregation is done mentally by the participant.

\[\begin{equation} \tag{3} Attitude = ContactFreq \times AverageContactQual \end{equation}\]

# prepare data
workerAvFreQual <- dtWorker$full %>%
  group_by(ExternalReference) %>%
  summarise(
    n = n(),
    SumContactNL = sum(Contact_dum),
    PercContactNL = SumContactNL / n * 100,
    SumContactNLAll = sum(number),
    AvAttitude = mean(thermometerDutch_1, na.rm = TRUE),
    AvQuality = mean(quality_overall_1, na.rm = TRUE)
  ) %>%
  mutate(
    WinSumContactNL = DescTools::Winsorize(SumContactNL),
    WinSumContactNLAll = DescTools::Winsorize(SumContactNLAll),
    SumContactNL_c = SumContactNL - mean(SumContactNL, na.rm = TRUE),
    SumContactNLAll_c = SumContactNLAll - mean(SumContactNLAll, na.rm = TRUE),
    AvAttitude_c = AvAttitude - mean(AvAttitude, na.rm = TRUE),
    AvQuality_c = AvQuality - mean(AvQuality, na.rm = TRUE)
  )

# correlation panel
pairs.panels.new(
  workerAvFreQual %>% select(SumContactNL, SumContactNLAll, AvQuality, AvAttitude),
  labels = c(
    "Sum:\nNumer of beeps with Outgroup Contact (NL)",
    "Sum:\nNumber of Outgroup Contacts (NL)",
    "Mean:\nInteraction Quality",
    "Mean:\nOutgroup Attitudes (NL)"
  )
)

# correlation panel with interaction sums winsorized
pairs.panels.new(
  workerAvFreQual %>% select(WinSumContactNL, WinSumContactNLAll, AvQuality, AvAttitude),
  labels = c(
    "Sum:\nNumer of beeps with Outgroup Contact (NL)\n[Winsorized]",
    "Sum:\nNumber of Outgroup Contacts (NL)\n[Winsorized]",
    "Mean:\nInteraction Quality",
    "Mean:\nOutgroup Attitudes (NL)"
  )
)

Within the data, we find a medium sized correlation between the participants’ Average Interaction Quality and their Average Outgroup Attitudes. Thus within our data participants with a higher quality outgroup interactions also held more positive attitudes towards that group. However, the frequency of intergroup interactions had no meaningful correlation with either the average interaction quality or average outgroup attitudes.

# regression
mdlWorker$lmAttFreqQualX <-
  lm(AvAttitude ~ SumContactNL_c * AvQuality_c, data = workerAvFreQual)
# summary(lmWorkerAttFreqQualX)

summ(
  mdlWorker$lmAttFreqQualX,
  confint = TRUE,
  digits = 3,
  center = TRUE
)
Observations 21 (2 missing obs. deleted)
Dependent variable AvAttitude
Type OLS linear regression
F(3,17) 6.663
0.540
Adj. R² 0.459
Est. 2.5% 97.5% t val. p
(Intercept) 70.930 66.519 75.341 33.927 0.000
SumContactNL_c 0.269 -0.150 0.688 1.354 0.193
AvQuality_c 0.765 0.288 1.242 3.385 0.004
SumContactNL_c:AvQuality_c -0.049 -0.085 -0.014 -2.954 0.009
Standard errors: OLS; Continuous predictors are mean-centered.
mdlWorker$lmAttFreqQualXEta <-
  effectsize::eta_squared(mdlWorker$lmAttFreqQualX, partial = TRUE)

interactions::interact_plot(
  mdlWorker$lmAttFreqQualX,
  pred = AvQuality_c,
  modx = SumContactNL_c,
  interval = TRUE,
  partial.residuals = TRUE
)

interactions::johnson_neyman(mdlWorker$lmAttFreqQualX,
                             pred = AvQuality_c,
                             modx = SumContactNL_c,
                             alpha = .05)
## JOHNSON-NEYMAN INTERVAL 
## 
## When SumContactNL_c is OUTSIDE the interval [6.83, 58.60], the slope of AvQuality_c is p < .05.
## 
## Note: The range of observed values of SumContactNL_c is [-14.83, 34.17]

We find that interaction quality is significantly related to higher outgroup attitudes (albeit with a small effect size). We also find that in our sample with an increasing number of interactions the positive effect of interaction quality becomes weaker. However, it should be noted that this is based on data aggregating all within participant nuances and is only the date of 21 people.

Multi-Level Regression

We then proceeded with a multilevel analysis, which also acknowledges that the measurements are nested within participants, but makes full use of the within participant variation.

Unconditional model

We start by creating an empty random intercept model (i.e., let the outgroup attitude intercept be different between participants; unconditional model).

\[\begin{equation} \tag{4} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \end{split} \end{equation}\]

# Create and save Model
mdlWorker$lmerAttNull <-
  lme4::lmer(thermometerDutch_1 ~ 1 + (1 | PID),
    data = dtWorker$full
  ) # use optim if it does not converge

mdlWorker$lmeAttNull <-
  lme(
    thermometerDutch_1 ~ 1,
    random = ~ 1 | PID,
    data = dtWorker$full,
    control = list(opt = "nlmimb")
  ) # use optim if it does not converge

# Get summary with p-values (Satterthwaite's method)
# summary(lmerWorkerAttNull) #or with the lme function
summ(mdlWorker$lmerAttNull, digits = 3, center = TRUE)
Observations 1225
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 8805.880
BIC 8821.213
Pseudo-R² (fixed effects) 0.000
Pseudo-R² (total) 0.698
Fixed Effects
Est. S.E. t val. d.f. p
(Intercept) 71.338 2.695 26.466 22.053 0.000
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 12.797
Residual 8.425
Grouping Variables
Group # groups ICC
PID 23 0.698
# generate 95% parametric bootstrap CIs (and save them as a csv-file):
# write.csv(confint(lmer(thermometerDutch_1~1 + (1|PID),data=dtWorker$full),
#                  method="boot",nsim=1000,
#                  parallel = "multicore", ncpus = 4, seed = 42),
#          "output/tables/ML-Null-CI.csv")

# Save variances
mdlWorker$varAttNull <- 
  VarCorr(mdlWorker$lmeAttNull) # save variances
# The estimate of (between-group or Intercept variance, tau_{00}^2):
mdlWorker$tauAttNull <- 
  as.numeric(mdlWorker$varAttNull[1])
# and the estimate of (within-group or residual variance, sigma^2) is:
mdlWorker$sigmaAttNull <- 
  as.numeric(mdlWorker$varAttNull[2])
# The ICC estimate (between/between+within) is:
mdlWorker$IccAttNull <-
  (as.numeric(mdlWorker$varAttNull[1]) / (as.numeric(mdlWorker$varAttNull[1]) + as.numeric(mdlWorker$varAttNull[2])))
mdlWorker$IccPercAttNull <-
  ((as.numeric(mdlWorker$varAttNull[1]) / (as.numeric(mdlWorker$varAttNull[1]) + as.numeric(mdlWorker$varAttNull[2])))) * 100

We then compare the random intercept model to a model without a random intercept (i.e., without levels at all).

# Create and save Model
mdlWorker$glsAttNull  <-
  gls(thermometerDutch_1 ~ 1,
      data = dtWorker$full,
      control = list(opt = "nlmimb"))

# calculate Deviances manually:
mdlWorker$DevianceGlsNull <- logLik(mdlWorker$glsAttNull) * -2
mdlWorker$DevianceMlNull <- logLik(mdlWorker$lmeAttNull) * -2

# Compare the two null models:
anova(mdlWorker$glsAttNull,
      mdlWorker$lmeAttNull) %>% 
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  add_rownames(., var = "Description") %>%
  mutate(Description = gsub(".*\\$", "", Description)) %>%
  kbl(
    .,
    caption = "Worker: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 4: Worker: Model Comparison
Description Model df AIC BIC logLik Test L.Ratio p-value
glsAttNull 1 2 10133 10144 -5065
lmeAttNull 2 3 8806 8821 -4400 1 vs 2 1329.406 < .001

Comparing the two empty model, we find that there is indeed a significant amount of variance explained by including a random intercept.

To assess the variances within and between participants we look at the ICC \(\tau_{00}^2 / (\tau_{00}^2 + \sigma^2)\): The ratio of the between-cluster variance to the total variance is called the Intraclass Correlation. It tells you the proportion of the total variance in Y that is accounted for by the clustering. (In this case the clustering means clustering observations per participant).

We find that an estimated 69.76% of the variation in Feeling Thermometer scores is explained by between participant differences (clustering by PID). This is to say that 69.76% of the variance in any individual report of Attitudes towards the Dutch can be explained by the properties of the individual who provided the rating. And we find that including ‘participant’ as a predictor adds significantly to the model.

random intercept with predictors

To this random intercept model we now add the two types of interactions possible at each measurement point as contemporaneous predictors of outgroup attitudes. That is: We check whether within participants having an outgroup interaction (or a non-outgroup interaction) is associated with more positive outgroup attitudes at the same measurement point.

\[\begin{equation} \tag{5} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}OutgroupInteraction_{ti} + \\ &\ \beta_{2i}NonOutgroupInteraction_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \\ &\ \beta_{2i} = \gamma_{20} \end{split} \end{equation}\]

# Create and save Model
mdlWorker$lmeInterceptAttType <-
  lme(
    thermometerDutch_1 ~ OutgroupInteraction + NonOutgroupInteraction,
    random =  ~ 1 | PID,
    data = workerInteractionType
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlWorker$lmerInterceptAttType <- lmer(
    thermometerDutch_1 ~ OutgroupInteraction + NonOutgroupInteraction + (1 | PID),
    data = workerInteractionType
  ),
  confint = TRUE,
  digits = 3,
  center = TRUE
)
Observations 1225
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 8788.218
BIC 8813.771
Pseudo-R² (fixed effects) 0.006
Pseudo-R² (total) 0.703
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 70.343 65.002 75.683 25.814 22.897 0.000
OutgroupInteraction 2.477 1.364 3.589 4.365 1204.135 0.000
NonOutgroupInteraction 0.427 -0.683 1.538 0.754 1204.911 0.451
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 12.811
Residual 8.362
Grouping Variables
Group # groups ICC
PID 23 0.701
mdlWorker$lmerInterceptAttTypeCI <- 
  confint(method = "Wald", mdlWorker$lmerInterceptAttType)

# Compare new model to previous step
anova(mdlWorker$lmeAttNull, 
      mdlWorker$lmeInterceptAttType) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  add_rownames(., var = "Description") %>%
  mutate(Description = gsub(".*\\$", "", Description)) %>%
  kbl(
    .,
    caption = "Worker: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 5: Worker: Model Comparison
Description Model df AIC BIC logLik Test L.Ratio p-value
lmeAttNull 1 3 8806 8821 -4400
lmeInterceptAttType 2 5 8788 8814 -4389 1 vs 2 21.663 < .001
# Save variances
mdlWorker$varInterceptAttType <- 
  lme4::VarCorr(mdlWorker$lmeInterceptAttType)

# The estimate of between-group (or Intercept variance) explained:
# Variance Explained = 1 – (Var with Predictor/Var without Predictor)
mdlWorker$varBtwInterceptAttType <-
  1 - (as.numeric(mdlWorker$varInterceptAttType[1]) / as.numeric(mdlWorker$varAttNull[1]))
mdlWorker$varBtwPercInterceptAttType <-
  (1 - (as.numeric(mdlWorker$varInterceptAttType[1]) / as.numeric(mdlWorker$varAttNull[1]))) * 100
# and the estimate of within-group (or residual variance) explained is:
mdlWorker$varWithinInterceptAttType <-
  1 - (as.numeric(mdlWorker$varInterceptAttType[2]) / as.numeric(mdlWorker$varAttNull[2]))
mdlWorker$varWithinPercInterceptAttType <-
  (1 - (as.numeric(mdlWorker$varInterceptAttType[2]) / as.numeric(mdlWorker$varAttNull[2]))) * 100

We find that a random intercept model with the two interaction types as predictors explains significantly more variance then an empty random intercept model. Looking at the individual coefficients, we find that having an outgroup interaction is indeed associated with significantly more positive outgroup attitudes, while having an interaction with a non-Dutch person does not significantly relate to more positive or negative attitudes towards the Dutch.

TL;DR: Interaction with Dutch is great predictor, interactions with non-Dutch is not.

random slope

In a next step, we check whether further letting the effect of the different interaction types vary between participants explains additional variance in outgroup attitudes (i.e., random slopes).

\[\begin{equation} \tag{6} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}OutgroupInteraction_{ti} + \\ &\ \beta_{2i}NonOutgroupInteraction_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \\ &\ \beta_{2i} = \gamma_{20} + u_{2i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlWorker$lmeSlopesAttType <- lme(
  thermometerDutch_1 ~
    OutgroupInteraction + NonOutgroupInteraction,
  random = ~ 1 + OutgroupInteraction + NonOutgroupInteraction | PID,
  control = lmeControl(opt = "optim"),
  data = workerInteractionType
)

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlWorker$lmerSlopesAttType <- lmer(
    thermometerDutch_1 ~
      OutgroupInteraction + NonOutgroupInteraction +
      (1 + OutgroupInteraction + NonOutgroupInteraction | PID),
    data = workerInteractionType
  ), 
  confint = TRUE,
  digits = 3,
  center = TRUE
)
Observations 1225
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 8793.695
BIC 8844.802
Pseudo-R² (fixed effects) 0.006
Pseudo-R² (total) 0.710
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 70.371 65.001 75.741 25.682 21.938 0.000
OutgroupInteraction 2.572 1.079 4.065 3.376 19.291 0.003
NonOutgroupInteraction 0.434 -0.693 1.562 0.755 205.804 0.451
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 12.883
PID OutgroupInteraction 2.248
PID NonOutgroupInteraction 0.424
Residual 8.307
Grouping Variables
Group # groups ICC
PID 23 0.706
# Simple (i.e., fast) CIs
mdlWorker$lmerSlopesAttTypeCI <- 
  confint(method = "Wald", mdlWorker$lmerSlopesAttType)

# Compare new model to previous step
anova(mdlWorker$lmeInterceptAttType, 
      mdlWorker$lmeSlopesAttType) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  add_rownames(., var = "Description") %>%
  mutate(Description = gsub(".*\\$", "", Description)) %>%
  kbl(
    .,
    caption = "Worker: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 6: Worker: Model Comparison
Description Model df AIC BIC logLik Test L.Ratio p-value
lmeInterceptAttType 1 5 8788 8814 -4389
lmeSlopesAttType 2 10 8794 8845 -4387 1 vs 2 4.165 0.526
# Save variances
mdlWorker$varSlopesAttType <- 
  lme4::VarCorr(mdlWorker$lmeSlopesAttType)

# The estimate of between-group (or Intercept variance) explained:
# Variance Explained = 1 – (Var with Predictor/Var without Predictor)
mdlWorker$varBtwSlopesAttType <- 
  1 - (as.numeric(mdlWorker$varSlopesAttType[1]) / as.numeric(mdlWorker$varInterceptAttType[1]))
mdlWorker$varBtwPercSlopesAttType <- 
  (1 - (as.numeric(mdlWorker$varSlopesAttType[1]) / as.numeric(mdlWorker$varInterceptAttType[1]))) * 100
# and the estimate of within-group (or residual variance) explained is:
mdlWorker$varWithinSlopesAttType <- 
  1 - (as.numeric(mdlWorker$varSlopesAttType[2]) / as.numeric(mdlWorker$varInterceptAttType[2]))
mdlWorker$varWithinPercSlopesAttType <- 
  (1 - (as.numeric(mdlWorker$varSlopesAttType[2]) / as.numeric(mdlWorker$varInterceptAttType[2]))) * 100

# Assumption Checks:
mdlWorker$diagSlopesAttType <-
  sjPlot::plot_model(mdlWorker$lmerSlopesAttType, type = "diag")
grid.arrange(
  mdlWorker$diagSlopesAttType[[1]],
  mdlWorker$diagSlopesAttType[[2]]$`PID`,
  mdlWorker$diagSlopesAttType[[3]],
  mdlWorker$diagSlopesAttType[[4]]
)

# Plot prediction model
mdlWorker$predSlopesAttType <- 
  workerInteractionType %>%
  select(thermometerDutch_1, session, PID) %>% 
  mutate(measure = predict(mdlWorker$lmeSlopesAttType,
                           workerInteractionType,
                           re.form = NA
                           )
         )

(
  mdlWorker$predPltSlopesAttType <-
    ggplot(data = mdlWorker$predSlopesAttType, aes(x = session, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = thermometerDutch_1), alpha = 1) +
    facet_wrap(~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/Worker_PredictionPlot_SlopesAttType.png",
  mdlWorker$predPltSlopesAttType,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

We find that adding the random slopes does not add significantly beyond the random intercept model. This is unusual because this might indicate the the effect is very consistent across participants. However, this might also be the case due to a small number of participants, or other measurement issues.

TL;DR: Random slopes don’t add much for this super simple model.

Need Fulfillment

The main proposal of our article is that the success of an outgroup interaction might be explained by whether or not the interaction fulfilled the person’s core situational need. This should, in turn, be due to a higher perceived interaction quality. We will this sequentially test whether the fulfillment of the core need during an interaction is (1) related to more positive outgroup attitudes, (2) higher perceived interaction quality, and (3) whether the variance explained by the core need is assumed by the perceived interaction quality if considered jointly.

Need fulfillment and Attitudes

In a first step we, thus, test the relationship between outgroup attitudes and the fulfillment of the core situational need during the interaction.

Unconditional model

We again start by creating an empty random intercept model (i.e., let the outgroup attitude intercept be different between participants; unconditional model). Note that this unconditional model differs from the empty model created earlier because for this set of analyses we will only consider the subsample of measurement points during which an outgroup interaction was reported. This is necessary because measurements of needs during the interaction and perceived interaction quality are only meaningful within an interaction context.

# see how large our outgroup interaction subset actually is
tbl_cross(
  workerInteractionType,
  row = OutgroupInteraction,
  col = NonOutgroupInteraction,
  percent = "row"
)
Characteristic Did you meet non-Dutch people this morning? (in person for at least 10 minutes) Total
no yes
Did you meet a Dutch person this morning? (In person interaction for at least 10 minutes)
No 337 (40%) 501 (60%) 838 (100%)
Yes 110 (28%) 277 (72%) 387 (100%)
Total 447 (36%) 778 (64%) 1,225 (100%)
# create outgroup interaction subset
workerOutgroupInteraction <- workerInteractionType %>%
  filter(OutgroupInteraction == "Yes")

# create empty list to organize models
mdlWorkerOut <- 
  list(
    Att = list(),
    Qlt = list()
  )

\[\begin{equation} \tag{7} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \end{split} \end{equation}\]

# Create and save Model
mdlWorkerOut$Att$lmerNull <-
  lme4::lmer(thermometerDutch_1 ~ 1 + (1 | PID), 
             data = workerOutgroupInteraction) # use optim if it does not converge
mdlWorkerOut$Att$lmeNull <-
  lme(
    thermometerDutch_1 ~ 1,
    random = ~ 1 | PID,
    data = workerOutgroupInteraction,
    control = list(opt = "nlmimb")
  ) # use optim if it does not converge

# Get summary with p-values (Satterthwaite's method)
# summary(Null.Out.ML.r) #or with the lme function
summ(mdlWorkerOut$Att$lmerNull, digits = 3, center = TRUE)
Observations 387
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 2863.460
BIC 2875.336
Pseudo-R² (fixed effects) 0.000
Pseudo-R² (total) 0.684
Fixed Effects
Est. S.E. t val. d.f. p
(Intercept) 72.550 2.910 24.933 19.198 0.000
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.069
Residual 8.883
Grouping Variables
Group # groups ICC
PID 21 0.684
# generate 95% parametric bootstrap CIs (and save them as a csv-file):
# write.csv(confint(lmer(thermometerDutch_1~1 + (1|PID),data=dtWorker$full),
#                  method="boot",nsim=1000,
#                  parallel = "multicore", ncpus = 4, seed = 42),
#          "output/tables/ML-Null-CI.csv")

# Save variances
mdlWorkerOut$Att$varNull <- 
  VarCorr(mdlWorkerOut$Att$lmeNull) # save variances
# The estimate of (between-group or Intercept variance, tau_{00}^2):
mdlWorkerOut$Att$tauNull <- 
  as.numeric(mdlWorkerOut$Att$varNull[1])
# and the estimate of (within-group or residual variance, sigma^2) is:
mdlWorkerOut$Att$sigmaNull <- 
  as.numeric(mdlWorkerOut$Att$varNull[2])
# The ICC estimate (between/between+within) is:
mdlWorkerOut$Att$IccNull <-
  (as.numeric(mdlWorkerOut$Att$varNull[1]) / (as.numeric(mdlWorkerOut$Att$varNull[1]) + as.numeric(mdlWorkerOut$Att$varNull[2])))
mdlWorkerOut$Att$IccPercNull <-
  ((as.numeric(mdlWorkerOut$Att$varNull[1]) / (as.numeric(mdlWorkerOut$Att$varNull[1]) + as.numeric(mdlWorkerOut$Att$varNull[2])))) * 100

random intercept with level one predictors

We thus add the core interaction need fulfillment to the multilevel random intercept model.

\[\begin{equation} \tag{8} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \end{split} \end{equation}\]

# Create and save Model
mdlWorkerOut$Att$lmeInterceptCore <-
  lme(
    thermometerDutch_1 ~ keymotive_fulfillemt_1_cwc,
    random = ~ 1 | PID,
    data = workerOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlWorkerOut$Att$lmerInterceptCore <- lmer(thermometerDutch_1 ~ keymotive_fulfillemt_1_cwc + (1 | PID), 
       data = workerOutWithinBetween),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 387
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 2842.458
BIC 2858.292
Pseudo-R² (fixed effects) 0.022
Pseudo-R² (total) 0.708
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 72.545 66.834 78.256 24.895 19.249 0.000
keymotive_fulfillemt_1_cwc 0.149 0.095 0.203 5.423 364.280 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.107
Residual 8.556
Grouping Variables
Group # groups ICC
PID 21 0.701
# Generate 95% CIs
mdlWorkerOut$Att$lmerInterceptCoreCI <- 
  confint(method = "Wald", mdlWorkerOut$Att$lmerInterceptCore)

# Compare new model to previous step
anova(mdlWorkerOut$Att$lmeNull, 
      mdlWorkerOut$Att$lmeInterceptCore) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Worker: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 7: Worker: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlWorkerOut\(Att\)lmeNull 1 3 2863 2875 -1429
mdlWorkerOut\(Att\)lmeInterceptCore 2 4 2842 2858 -1417 1 vs 2 23.002 < .001
# Save variances
mdlWorkerOut$Att$varInterceptCore <-
  lme4::VarCorr(mdlWorkerOut$Att$lmeInterceptCore)

# The estimate of between-group (or Intercept variance) explained:
# Variance Explained = 1 – (Var with Predictor/Var without Predictor)
mdlWorkerOut$Att$varBtwInterceptCore <- 
  1 - (as.numeric(mdlWorkerOut$Att$varInterceptCore[1]) / as.numeric(mdlWorkerOut$Att$varNull[1]))
mdlWorkerOut$Att$varBtwPercInterceptCore <- 
  (1 - (as.numeric(mdlWorkerOut$Att$varInterceptCore[1]) / as.numeric(mdlWorkerOut$Att$varNull[1]))) * 100
# and the estimate of within-group (or residual variance) explained is:
mdlWorkerOut$Att$varWithinInterceptCore <-
  1 - (as.numeric(mdlWorkerOut$Att$varInterceptCore[2]) / as.numeric(mdlWorkerOut$Att$varNull[2]))
mdlWorkerOut$Att$varWithinPercInterceptCore <-
  (1 - (as.numeric(mdlWorkerOut$Att$varInterceptCore[2]) / as.numeric(mdlWorkerOut$Att$varNull[2]))) * 100

We find that the the model with the added predictor indeed explains more variance in outgroup attitudes than the unconditional model. Looking at the individual coefficients, we find that the situational core need relates significantly to outgroup attitudes. The core need has little explained variance between participants (compared to the null model: Variance Explained = 1 – (Var with Predictor/Var without Predictor); -0.59%). The variance explained within participants is small to medium (7.21%).

random slope

In a next step, we check whether further letting the effect of core need fulfillment vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{9} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlWorkerOut$Att$lmeSlopesCore <-
  lme(
    thermometerDutch_1 ~
      keymotive_fulfillemt_1_cwc,
    random = ~ 1 + keymotive_fulfillemt_1_cwc | PID,
    control = lmeControl(opt = "optim"),
    data = workerOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlWorkerOut$Att$lmerSlopesCore <- lmer(
    thermometerDutch_1 ~
      keymotive_fulfillemt_1_cwc +
      (1 + keymotive_fulfillemt_1_cwc | PID),
    data = workerOutWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 387
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 2815.675
BIC 2839.425
Pseudo-R² (fixed effects) 0.030
Pseudo-R² (total) 0.752
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 72.535 66.825 78.244 24.901 19.503 0.000
keymotive_fulfillemt_1_cwc 0.175 0.058 0.292 2.936 18.230 0.009
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.136
PID keymotive_fulfillemt_1_cwc 0.214
Residual 7.944
Grouping Variables
Group # groups ICC
PID 21 0.732
# all variables standardized within PPT
summ(
  mdlWorkerOut$Att$lmerSlopesCoreZ <- lmer(
    thermometerDutch_1_zwc ~
      keymotive_fulfillemt_1_zwc +
      (1 + keymotive_fulfillemt_1_zwc | PID),
    data = workerOutWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 382
Dependent variable thermometerDutch_1_zwc
Type Mixed effects linear regression
AIC 1048.418
BIC 1072.091
Pseudo-R² (fixed effects) 0.087
Pseudo-R² (total) 0.105
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 0.000 -0.093 0.093 0.000 361.980 1.000
keymotive_fulfillemt_1_zwc 0.295 0.179 0.412 4.963 13.517 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 0.000
PID keymotive_fulfillemt_1_zwc 0.136
Residual 0.924
Grouping Variables
Group # groups ICC
PID 20 0.000
# standardized coefficients
stdCoef.merMod(mdlWorkerOut$Att$lmerSlopesCore)
##                            stdcoef   stdse
## (Intercept)                 0.0000 0.00000
## keymotive_fulfillemt_1_cwc  0.2073 0.07061
# 95%CIs
mdlWorkerOut$Att$lmerSlopesCoreCI <- 
  confint(method = "Wald", mdlWorkerOut$Att$lmerSlopesCore)

# Attempts at R^2
r2mlm::r2mlm(mdlWorkerOut$Att$lmerSlopesCore, bargraph = TRUE)

## $Decompositions
##                 total              within             between
## fixed, within   0.0302011330572777 0.0934734539936625 NA     
## fixed, between  0                  NA                 0      
## slope variation 0.0453370936790715 0.140319726818854  NA     
## mean variation  0.676901496981963  NA                 1      
## sigma2          0.247560276281688  0.766206819187484  NA     
## 
## $R2s
##     total              within             between
## f1  0.0302011330572777 0.0934734539936625 NA     
## f2  0                  NA                 0      
## v   0.0453370936790715 0.140319726818854  NA     
## m   0.676901496981963  NA                 1      
## f   0.0302011330572777 NA                 NA     
## fv  0.0755382267363492 0.233793180812516  NA     
## fvm 0.752439723718312  NA                 NA
mitml::multilevelR2(mdlWorkerOut$Att$lmerSlopesCore)
##      RB1      RB2       SB      MVP 
##  0.20023 -0.01023  0.05628  0.03020
performance::r2(mdlWorkerOut$Att$lmerSlopesCore)
## # R2 for Mixed Models
## 
##   Conditional R2: 0.752
##      Marginal R2: 0.030
performance::model_performance(mdlWorkerOut$Att$lmerSlopesCore)
## # Indices of model performance
## 
## AIC      |      BIC | R2 (cond.) | R2 (marg.) |   ICC |  RMSE | Sigma
## ---------------------------------------------------------------------
## 2815.675 | 2839.425 |      0.752 |      0.030 | 0.745 | 7.590 | 7.944
performance::compare_performance(mdlWorkerOut$Att$lmerNull, 
                                 mdlWorkerOut$Att$lmerInterceptCore, 
                                 mdlWorkerOut$Att$lmerSlopesCore)
## # Comparison of Model Performance Indices
## 
## Name    |   Model |      AIC |      BIC | R2 (cond.) | R2 (marg.) |   ICC |  RMSE | Sigma
## -----------------------------------------------------------------------------------------
## Model 1 | lmerMod | 2863.460 | 2875.336 |      0.684 |      0.000 | 0.684 | 8.648 | 8.883
## Model 2 | lmerMod | 2842.458 | 2858.292 |      0.708 |      0.022 | 0.701 | 8.318 | 8.556
## Model 3 | lmerMod | 2815.675 | 2839.425 |      0.752 |      0.030 | 0.745 | 7.590 | 7.944
# Compare new model to previous step
anova(mdlWorkerOut$Att$lmeNull, 
      mdlWorkerOut$Att$lmeInterceptCore, 
      mdlWorkerOut$Att$lmeSlopesCore) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Worker: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 8: Worker: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlWorkerOut\(Att\)lmeNull 1 3 2863 2875 -1429
mdlWorkerOut\(Att\)lmeInterceptCore 2 4 2842 2858 -1417 1 vs 2 23.002 < .001
mdlWorkerOut\(Att\)lmeSlopesCore 3 6 2816 2839 -1402 2 vs 3 30.784 < .001
# Save variances
mdlWorkerOut$Att$varSlopesCore <- 
  lme4::VarCorr(mdlWorkerOut$Att$lmeSlopesCore)

# The estimate of between-group (or Intercept variance) explained:
# Variance Explained = 1 – (Var with Predictor/Var without Predictor)
mdlWorkerOut$Att$varBtwSlopesCore <-
  1 - (as.numeric(mdlWorkerOut$Att$varSlopesCore[1]) / as.numeric(mdlWorkerOut$Att$varInterceptCore[1]))
mdlWorkerOut$Att$varBtwPercSlopesCore <-
  (1 - (as.numeric(mdlWorkerOut$Att$varSlopesCore[1]) / as.numeric(mdlWorkerOut$Att$varInterceptCore[1]))) * 100
# and the estimate of within-group (or residual variance) explained is:
mdlWorkerOut$Att$varWithinSlopesCore <-
  1 - (as.numeric(mdlWorkerOut$Att$varSlopesCore[2]) / as.numeric(mdlWorkerOut$Att$varInterceptCore[2]))
mdlWorkerOut$Att$varWithinPercSlopesCore <-
  (1 - (as.numeric(mdlWorkerOut$Att$varSlopesCore[2]) / as.numeric(mdlWorkerOut$Att$varInterceptCore[2]))) * 100

# Assumption Checks:
mdlWorkerOut$Att$diagSlopesCore <- 
  sjPlot::plot_model(mdlWorkerOut$Att$lmerSlopesCore, type = "diag")
grid.arrange(
  mdlWorkerOut$Att$diagSlopesCore[[1]],
  mdlWorkerOut$Att$diagSlopesCore[[2]]$`PID`,
  mdlWorkerOut$Att$diagSlopesCore[[3]],
  mdlWorkerOut$Att$diagSlopesCore[[4]]
)

# Plot prediction model
mdlWorkerOut$Att$predSlopesCore <- 
  workerOutWithinBetween %>%
  select(thermometerDutch_1, session, PID) %>% 
  mutate(measure = predict(mdlWorkerOut$Att$lmeSlopesCore,
                           workerOutWithinBetween,
                           re.form = NA
                           )
         )

(
  mdlWorkerOut$Att$predPltSlopesCore <-
    ggplot(data = mdlWorkerOut$Att$predSlopesCore, aes(x = session, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = thermometerDutch_1), alpha = 1) +
    facet_wrap( ~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/WorkerOut_PredictionPlot_SlopesAttCore.png",
  mdlWorkerOut$Att$predPltSlopesCore,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

We find that adding the random slopes does add significantly beyond the random intercept model. We also find that the core need remains a strong predictor (even when letting the influence vary between participants).

TL;DR: The random slope adds significantly to the prediction model.

Need fulfillment and Interaction Quality

Based on the assertion that the relationship between core need fulfillment and outgroup attitudes is explained by a higher perceived interaction, the core need fulfillement should also significantly predict perceived interaction quality.

Unconditional model

Given that we now have the perceived interaction quality as our outcome variable of interest we again begin with an unconditional model (i.e., empty random intercept model), to see whether there is enough variance to explain within the participants. Similarly to before this is again done within the subsample of measurements during which an outgroup interaction was reported.

\[\begin{equation} \tag{10} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ InteractionQuality_{ti} = &\ \beta_{0i} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \end{split} \end{equation}\]

# Create and save Model
mdlWorkerOut$Qlt$lmerNull <-
  lme4::lmer(quality_overall_1 ~ 1 + (1 | PID), 
             data = workerOutWithinBetween) # use optim if it does not converge

mdlWorkerOut$Qlt$lmeNull <-
  lme(
    quality_overall_1 ~ 1,
    random = ~ 1 | PID,
    data = workerOutWithinBetween,
    control = list(opt = "nlmimb")
  ) # use optim if it does not converge

# Get summary with p-values (Satterthwaite's method)
# summary(Null.Out.Qual.ML.r) #or with the lme function
summ(mdlWorkerOut$Qlt$lmerNull, digits = 3)
Observations 387
Dependent variable quality_overall_1
Type Mixed effects linear regression
AIC 3347.534
BIC 3359.410
Pseudo-R² (fixed effects) 0.000
Pseudo-R² (total) 0.183
Fixed Effects
Est. S.E. t val. d.f. p
(Intercept) 24.285 2.090 11.619 20.156 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 8.286
Residual 17.511
Grouping Variables
Group # groups ICC
PID 21 0.183
# Save variances
mdlWorkerOut$Qlt$varNull <- 
  VarCorr(mdlWorkerOut$Qlt$lmeNull) # save variances
# The estimate of (between-group or Intercept variance, tau_{00}^2):
mdlWorkerOut$Qlt$tauNull <- 
  as.numeric(mdlWorkerOut$Qlt$varNull[1])
# and the estimate of (within-group or residual variance, sigma^2) is:
mdlWorkerOut$Qlt$sigmaNull <- 
  as.numeric(mdlWorkerOut$Qlt$varNull[2])
# The ICC estimate (between/between+within) is:
mdlWorkerOut$Qlt$IccNull <-
  (as.numeric(mdlWorkerOut$Qlt$varNull[1]) / (as.numeric(mdlWorkerOut$Qlt$varNull[1]) + as.numeric(mdlWorkerOut$Qlt$varNull[2])))
mdlWorkerOut$Qlt$IccPercNull <-
  ((as.numeric(mdlWorkerOut$Qlt$varNull[1]) / (as.numeric(mdlWorkerOut$Qlt$varNull[1]) + as.numeric(mdlWorkerOut$Qlt$varNull[2])))) * 100

We again find a reasonable amount of variance within the participants.

random intercept with level one predictor

We again add the core interaction need fulfillment to the multilevel random intercept model.

\[\begin{equation} \tag{11} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ InteractionQuality_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \end{split} \end{equation}\]

# Create and save Model
mdlWorkerOut$Qlt$lmeInterceptCore <-
  lme(
    quality_overall_1 ~ keymotive_fulfillemt_1_cwc,
    random = ~ 1 | PID,
    data = workerOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlWorkerOut$Qlt$lmerInterceptCore <- 
    lmer(quality_overall_1 ~ keymotive_fulfillemt_1_cwc + (1 | PID), 
       data = workerOutWithinBetween),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 387
Dependent variable quality_overall_1
Type Mixed effects linear regression
AIC 3293.508
BIC 3309.342
Pseudo-R² (fixed effects) 0.117
Pseudo-R² (total) 0.306
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 24.281 20.197 28.365 11.653 20.090 0.000
keymotive_fulfillemt_1_cwc 0.418 0.317 0.520 8.070 366.218 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 8.434
Residual 16.158
Grouping Variables
Group # groups ICC
PID 21 0.214
# 95%CI
mdlWorkerOut$Qlt$lmerInterceptCoreCI <- 
  confint(method = "Wald", mdlWorkerOut$Qlt$lmerInterceptCore)

# Compare new model to previous step
anova(mdlWorkerOut$Qlt$lmeNull, 
      mdlWorkerOut$Qlt$lmeInterceptCore) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Worker: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 9: Worker: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlWorkerOut\(Qlt\)lmeNull 1 3 3348 3359 -1671
mdlWorkerOut\(Qlt\)lmeInterceptCore 2 4 3294 3309 -1643 1 vs 2 56.026 < .001
# Save variances
mdlWorkerOut$Qlt$varInterceptCore <-
  lme4::VarCorr(mdlWorkerOut$Qlt$lmeInterceptCore)

# The estimate of between-group (or Intercept variance) explained:
# Variance Explained = 1 – (Var with Predictor/Var without Predictor)
mdlWorkerOut$Qlt$varBtwInterceptCore <- 
  1 - (as.numeric(mdlWorkerOut$Qlt$varInterceptCore[1]) / as.numeric(mdlWorkerOut$Qlt$varNull[1]))
mdlWorkerOut$Qlt$varBtwPercInterceptCore <- 
  (1 - (as.numeric(mdlWorkerOut$Qlt$varInterceptCore[1]) / as.numeric(mdlWorkerOut$Qlt$varNull[1]))) * 100
# and the estimate of within-group (or residual variance) explained is:
mdlWorkerOut$Qlt$varWithinInterceptCore <-
  1 - (as.numeric(mdlWorkerOut$Qlt$varInterceptCore[2]) / as.numeric(mdlWorkerOut$Qlt$varNull[2]))
mdlWorkerOut$Qlt$varWithinPercInterceptCore <-
  (1 - (as.numeric(mdlWorkerOut$Qlt$varInterceptCore[2]) / as.numeric(mdlWorkerOut$Qlt$varNull[2]))) * 100

The predictor again adds a significant amount of explained variances beyond the empty model and looking at the slope coefficient, we find that the situational core need fulifillment relates significantly to perceived interaction quality. The core need explained substantial variance between participants (-3.61%). The variance explained within participants is also medium (14.86%).

random slope

As before, we check whether further letting the effect of core need fulfillment vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{12} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ InteractionQuality_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlWorkerOut$Qlt$lmeSlopesCore <-
  lme(
    quality_overall_1 ~
      keymotive_fulfillemt_1_cwc,
    random = ~ 1 + keymotive_fulfillemt_1_cwc | PID,
    control = lmeControl(opt = "optim"),
    data = workerOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlWorkerOut$Qlt$lmerSlopesCore <-
    lmer(
      quality_overall_1 ~
        keymotive_fulfillemt_1_cwc +
        (1 + keymotive_fulfillemt_1_cwc | PID),
      data = workerOutWithinBetween
    ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 387
Dependent variable quality_overall_1
Type Mixed effects linear regression
AIC 3296.358
BIC 3320.108
Pseudo-R² (fixed effects) 0.124
Pseudo-R² (total) 0.330
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 24.281 20.195 28.366 11.649 20.059 0.000
keymotive_fulfillemt_1_cwc 0.433 0.296 0.569 6.202 6.853 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 8.461
PID keymotive_fulfillemt_1_cwc 0.167
Residual 15.980
Grouping Variables
Group # groups ICC
PID 21 0.219
# 95%CI
mdlWorkerOut$Qlt$lmerSlopesCoreCI <- 
  confint(method = "Wald", mdlWorkerOut$Qlt$lmerSlopesCore)

# Compare new model to previous step
anova(mdlWorkerOut$Qlt$lmeNull, 
      mdlWorkerOut$Qlt$lmeInterceptCore, 
      mdlWorkerOut$Qlt$lmeSlopesCore) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Worker: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 10: Worker: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlWorkerOut\(Qlt\)lmeNull 1 3 3348 3359 -1671
mdlWorkerOut\(Qlt\)lmeInterceptCore 2 4 3294 3309 -1643 1 vs 2 56.026 < .001
mdlWorkerOut\(Qlt\)lmeSlopesCore 3 6 3296 3320 -1642 2 vs 3 1.151 0.563
# Save variances
mdlWorkerOut$Qlt$varSlopesCore <- 
  lme4::VarCorr(mdlWorkerOut$Qlt$lmeSlopesCore)

We find that adding the random slopes does not add significantly beyond the random intercept model. This is unusual because this might indicate the the effect is very consistent across participants. However, we also see that when taking the possibility to varying slopes into account, the core need fulfillment remains a significant predictor of perceived interaction quality.

Interaction Needs, Quality, and Attitudes

In our final main step, we will jointly consider the effect of core need fulfillment and perceived interaction quality on outgroup attitudes. We expect that if the relationship between core need fulfillment and outgroup attitudes is indeed explained by a higher perceived interaction quality, the interaction quality perception should assume the explained variance of the core contact need fulfillment.

random intercept with level one predictors

We thus add both the core need fulfillment and perceived interaction quality to a random intercept model of outgroup attitudes.

\[\begin{equation} \tag{13} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}InteractionQuality_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \\ &\ \beta_{2i} = \gamma_{20} \end{split} \end{equation}\]

# Create and save Model
mdlWorkerOut$Att$lmeInterceptCoreQlt <-
  lme(
    thermometerDutch_1 ~ keymotive_fulfillemt_1_cwc + quality_overall_1_cwc,
    random = ~ 1 | PID,
    data = workerOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlWorkerOut$Att$lmerInterceptCoreQlt <- 
    lmer(
    thermometerDutch_1 ~ keymotive_fulfillemt_1_cwc + quality_overall_1_cwc + (1 | PID),
    data = workerOutWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 387
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 2760.196
BIC 2779.988
Pseudo-R² (fixed effects) 0.082
Pseudo-R² (total) 0.773
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 72.529 66.794 78.264 24.788 19.396 0.000
keymotive_fulfillemt_1_cwc 0.045 -0.006 0.097 1.723 363.415 0.086
quality_overall_1_cwc 0.247 0.199 0.295 10.080 363.415 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.215
Residual 7.574
Grouping Variables
Group # groups ICC
PID 21 0.753
# 95% CI
mdlWorkerOut$Att$lmerInterceptCoreQltCI <- 
  confint(method = "Wald", mdlWorkerOut$Att$lmerInterceptCoreQlt)

# Compare new model to previous step
anova(
  mdlWorkerOut$Att$lmeNull, 
  mdlWorkerOut$Att$lmeInterceptCoreQlt
  ) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Worker: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 11: Worker: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlWorkerOut\(Att\)lmeNull 1 3 2863 2875 -1429
mdlWorkerOut\(Att\)lmeInterceptCoreQlt 2 5 2760 2780 -1375 1 vs 2 107.264 < .001
# Save variances
mdlWorkerOut$Att$varInterceptCoreQlt <-
  lme4::VarCorr(mdlWorkerOut$Att$lmeInterceptCoreQlt)

# The estimate of between-group (or Intercept variance) explained:
# Variance Explained = 1 – (Var with Predictor/Var without Predictor)
mdlWorkerOut$Att$varBtwInterceptCoreQlt <- 
  1 - (as.numeric(mdlWorkerOut$Att$varInterceptCoreQlt[1]) / as.numeric(mdlWorkerOut$Att$varNull[1]))
mdlWorkerOut$Att$varBtwPercInterceptCoreQlt <- 
  (1 - (as.numeric(mdlWorkerOut$Att$varInterceptCoreQlt[1]) / as.numeric(mdlWorkerOut$Att$varNull[1]))) * 100
# and the estimate of within-group (or residual variance) explained is:
mdlWorkerOut$Att$varWithinInterceptCoreQlt <-
  1 - (as.numeric(mdlWorkerOut$Att$varInterceptCoreQlt[2]) / as.numeric(mdlWorkerOut$Att$varNull[2]))
mdlWorkerOut$Att$varWithinPercInterceptCoreQlt <-
  (1 - (as.numeric(mdlWorkerOut$Att$varInterceptCoreQlt[2]) / as.numeric(mdlWorkerOut$Att$varNull[2]))) * 100

Unsurprisingly, the model with the two predictors adds significantly beyond the empty unconditional model. However, more importantly, looking at the coefficients, we find that the effect of core need fulfillemnt indeed is indeed strongly reduced and the variance is explained by the perceived interaction quality. The variance explained in outgroup attitudes is of medium effect size (between: -2.25%, within: 27.29%).

random slope

We again check whether further letting the effects vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{14} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}InteractionQuality_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \\ &\ \beta_{2i} = \gamma_{20} + u_{2i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlWorkerOut$Att$lmeSlopesCoreQlt <-
  lme(
    thermometerDutch_1 ~
      keymotive_fulfillemt_1_cwc + quality_overall_1_cwc,
    random = ~ 1 + keymotive_fulfillemt_1_cwc + quality_overall_1_cwc | PID,
    control = lmeControl(opt = "optim"),
    data = workerOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlWorkerOut$Att$lmerSlopesCoreQlt <- lmer(
    thermometerDutch_1 ~
      keymotive_fulfillemt_1_cwc + quality_overall_1_cwc +
      (1 + keymotive_fulfillemt_1_cwc + quality_overall_1_cwc | PID),
    data = workerOutWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 387
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 2672.201
BIC 2711.785
Pseudo-R² (fixed effects) 0.067
Pseudo-R² (total) 0.851
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 72.496 66.748 78.244 24.720 19.724 0.000
keymotive_fulfillemt_1_cwc 0.037 -0.076 0.149 0.639 14.888 0.532
quality_overall_1_cwc 0.226 0.121 0.331 4.217 21.043 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.312
PID keymotive_fulfillemt_1_cwc 0.217
PID quality_overall_1_cwc 0.210
Residual 6.133
Grouping Variables
Group # groups ICC
PID 21 0.825
# 95%CI
mdlWorkerOut$Att$lmerSlopesCoreQltCI <- 
  confint(method = "Wald", mdlWorkerOut$Att$lmerSlopesCoreQlt)

# Compare new model to previous step
anova(
  mdlWorkerOut$Att$lmeNull,
  mdlWorkerOut$Att$lmeInterceptCoreQlt,
  mdlWorkerOut$Att$lmeSlopesCoreQlt
) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Worker: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 12: Worker: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlWorkerOut\(Att\)lmeNull 1 3 2863 2875 -1429
mdlWorkerOut\(Att\)lmeInterceptCoreQlt 2 5 2760 2780 -1375 1 vs 2 107.264 < .001
mdlWorkerOut\(Att\)lmeSlopesCoreQlt 3 10 2672 2712 -1326 2 vs 3 97.996 < .001
# Save variances
mdlWorkerOut$Att$varSlopesCoreQlt <- 
  lme4::VarCorr(mdlWorkerOut$Att$lmeSlopesCoreQlt)

# Assumption Checks:
mdlWorkerOut$Att$diagSlopesCoreQlt <- 
  sjPlot::plot_model(mdlWorkerOut$Att$lmerSlopesCoreQlt, type = "diag")
grid.arrange(
  mdlWorkerOut$Att$diagSlopesCoreQlt[[1]],
  mdlWorkerOut$Att$diagSlopesCoreQlt[[2]]$`PID`,
  mdlWorkerOut$Att$diagSlopesCoreQlt[[3]],
  mdlWorkerOut$Att$diagSlopesCoreQlt[[4]]
)

# Plot prediction model
mdlWorkerOut$Att$predSlopesCoreQlt <- 
  workerOutWithinBetween %>%
  select(thermometerDutch_1, session, PID) %>% 
  mutate(measure = predict(mdlWorkerOut$Att$lmeSlopesCoreQlt,
                           workerOutWithinBetween,
                           re.form = NA
                           )
         )

(
  mdlWorkerOut$Att$predPltSlopesCoreQlt <-
    ggplot(data = mdlWorkerOut$Att$predSlopesCoreQlt, aes(x = session, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = thermometerDutch_1), alpha = 1) +
    facet_wrap( ~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/WorkerOut_PredictionPlot_SlopesAttCoreQlt.png",
  mdlWorkerOut$Att$predPltSlopesCoreQlt,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

We find that adding the random slopes does add significantly beyond the random intercept model. We also find that the perceived interaction quality remains a strong predictor (even when letting the slopes vary between participants).

Check for robustness

To build further confidence in our results, we assess a few additional models that might offer alternative explanations of the effects we find.

Interaction Type

To make certain that the effect of core need fulfillment is specific to the interaction we compare the the effect to fulfillment of the situation core need when no outgroup interaction took place.

random intercept

Here we go back to the full dataset and add generalized situational core need fulfillment (either during an interaction or about the daytime in general) and whether an outgroup interaction happened as well as their interaction term to a random intercept model of outgroup attitudes.

\[\begin{equation} \tag{15} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}OutgroupInteraction_{ti} + \\ &\ \beta_{3i}KeyNeedFulfillXOutgroupInteraction_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \\ &\ \beta_{2i} = \gamma_{20} \\ &\ \beta_{3i} = \gamma_{30} \end{split} \end{equation}\]

# Create and save Model
mdlWorker$lmeInterceptAttCoreInt <-
  lme(
    thermometerDutch_1 ~ keyMotiveFulfilled_cwc * OutgroupInteraction,
    random =  ~ 1 | PID,
    data = workerWithinBetween
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlWorker$lmerInterceptAttCoreInt <- lmer(
    thermometerDutch_1 ~ keyMotiveFulfilled_cwc * OutgroupInteraction + (1 | PID),
    data = workerWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 1225
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 8771.519
BIC 8802.183
Pseudo-R² (fixed effects) 0.014
Pseudo-R² (total) 0.707
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 68.856 63.416 74.297 24.805 25.453 0.000
keyMotiveFulfilled_cwc -0.123 -0.192 -0.054 -3.502 1199.844 0.000
OutgroupInteraction 1.788 0.666 2.910 3.124 1203.278 0.002
keyMotiveFulfilled_cwc:OutgroupInteraction 0.131 0.077 0.185 4.748 1199.946 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 12.711
Residual 8.256
Grouping Variables
Group # groups ICC
PID 23 0.703
# 95% CI
mdlWorker$lmerInterceptAttCoreIntCI <- 
  confint(method = "Wald", mdlWorker$lmerInterceptAttCoreInt)

# Compare new model to previous step
anova(mdlWorker$lmeAttNull, 
      mdlWorker$lmeInterceptAttCoreInt) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  add_rownames(., var = "Description") %>%
  mutate(Description = gsub(".*\\$", "", Description)) %>%
  kbl(
    .,
    caption = "Worker: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = c("l", rep("c", ncol(.)-1)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 13: Worker: Model Comparison
Description Model df AIC BIC logLik Test L.Ratio p-value
lmeAttNull 1 3 8806 8821 -4400
lmeInterceptAttCoreInt 2 6 8772 8802 -4380 1 vs 2 40.362 < .001
# Save variances
mdlWorker$varInterceptAttCoreInt <- 
  lme4::VarCorr(mdlWorker$lmeInterceptAttCoreInt)

# The estimate of between-group (or Intercept variance) explained:
# Variance Explained = 1 – (Var with Predictor/Var without Predictor)
mdlWorker$varBtwInterceptAttCoreInt <-
  1 - (as.numeric(mdlWorker$varInterceptAttCoreInt[1]) / as.numeric(mdlWorker$varAttNull[1]))
mdlWorker$varBtwPercInterceptAttCoreInt <-
  (1 - (as.numeric(mdlWorker$varInterceptAttCoreInt[1]) / as.numeric(mdlWorker$varAttNull[1]))) * 100
# and the estimate of within-group (or residual variance) explained is:
mdlWorker$varWithinInterceptAttCoreInt <-
  1 - (as.numeric(mdlWorker$varInterceptAttCoreInt[2]) / as.numeric(mdlWorker$varAttNull[2]))
mdlWorker$varWithinPercInterceptAttCoreInt <-
  (1 - (as.numeric(mdlWorker$varInterceptAttCoreInt[2]) / as.numeric(mdlWorker$varAttNull[2]))) * 100

We find that the model explains significantly more variance than the empty null model. However, more interestingly, looking at the coefficients, we find that, as seen earlier, having an outgroup interaction has a strong effect on outgroup attitudes. Importantly, we find that there is no main effect of key need fulfillment but a significant interaction effect of core need fulfillment and outgroup contact. This indicates that it is not key need fulfillment in general — but only key need fulfillment during an outgroup contact that predicts more positive outgroup attitudes.

random slope

We again check whether further letting the effects vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{16} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}OutgroupInteraction_{ti} + \\ &\ \beta_{3i}KeyNeedFulfillXOutgroupInteraction_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \\ &\ \beta_{2i} = \gamma_{20} + u_{2i} \\ &\ \beta_{3i} = \gamma_{30} + u_{3i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlWorker$lmeSlopesAttCoreInt <- lme(
  thermometerDutch_1 ~
    keyMotiveFulfilled_cwc * OutgroupInteraction,
  random = ~ 1 + keyMotiveFulfilled_cwc + OutgroupInteraction | PID,
  control = lmeControl(opt = "optim"),
  data = workerWithinBetween
)

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlWorker$lmerSlopesAttCoreInt <- lmer(
    thermometerDutch_1 ~
      keyMotiveFulfilled_cwc * OutgroupInteraction +
      (1 + keyMotiveFulfilled_cwc + OutgroupInteraction | PID),
    data = workerWithinBetween
  ), 
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 1225
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 8761.159
BIC 8817.377
Pseudo-R² (fixed effects) 0.017
Pseudo-R² (total) 0.727
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 68.884 63.241 74.527 23.925 22.110 0.000
keyMotiveFulfilled_cwc -0.098 -0.185 -0.012 -2.224 62.290 0.030
OutgroupInteraction 1.808 0.248 3.369 2.271 20.131 0.034
keyMotiveFulfilled_cwc:OutgroupInteraction 0.127 0.069 0.184 4.310 703.205 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.191
PID keyMotiveFulfilled_cwc 0.100
PID OutgroupInteraction 2.460
Residual 8.044
Grouping Variables
Group # groups ICC
PID 23 0.729
# 95%CI
mdlWorker$lmerSlopesAttCoreIntCI <- 
  confint(method = "Wald", mdlWorker$lmerSlopesAttCoreInt)

# Compare new model to previous step
anova(mdlWorker$lmeAttNull, 
      mdlWorker$lmeInterceptAttCoreInt,
      mdlWorker$lmeSlopesAttCoreInt) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  add_rownames(., var = "Description") %>%
  mutate(Description = gsub(".*\\$", "", Description)) %>%
  kbl(
    .,
    caption = "Worker: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 14: Worker: Model Comparison
Description Model df AIC BIC logLik Test L.Ratio p-value
lmeAttNull 1 3 8806 8821 -4400
lmeInterceptAttCoreInt 2 6 8772 8802 -4380 1 vs 2 40.362 < .001
lmeSlopesAttCoreInt 3 11 8761 8817 -4370 2 vs 3 20.36 0.001
# Save variances
mdlWorker$varSlopesAttCoreInt <- 
  lme4::VarCorr(mdlWorker$lmeSlopesAttCoreInt)

# Assumption Checks:
mdlWorker$diagSlopesAttCoreInt <-
  sjPlot::plot_model(mdlWorker$lmerSlopesAttCoreInt, type = "diag")
grid.arrange(
  mdlWorker$diagSlopesAttCoreInt[[1]],
  mdlWorker$diagSlopesAttCoreInt[[2]]$`PID`,
  mdlWorker$diagSlopesAttCoreInt[[3]],
  mdlWorker$diagSlopesAttCoreInt[[4]]
)

# Plot prediction model
mdlWorker$predSlopesAttCoreInt <- 
  workerWithinBetween %>%
  select(thermometerDutch_1, session, PID) %>% 
  mutate(measure = predict(mdlWorker$lmeSlopesAttCoreInt,
                           workerWithinBetween,
                           re.form = NA
                           )
         )

(
  mdlWorker$predPltSlopesAttCoreInt <-
    ggplot(data = mdlWorker$predSlopesAttCoreInt, aes(x = session, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = thermometerDutch_1), alpha = 1) +
    facet_wrap(~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/Worker_PredictionPlot_SlopesAttCoreInt.png",
  mdlWorker$predPltSlopesAttCoreInt,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

We find that adding the random slopes does add significantly beyond the random intercept model. We also see that when taking the possibility to varying slopes into account, the coefficient interpretations remains consistent (i.e., outgroup contact and its interaction with core need fulfillment remain significant predictors of positive outgroup attitudes).

Plot Interaction

Before we move on, we shortly illustrate the interaction effect of how the effect of core need fulfillment differed by whether an outgroup contact took place or not. To this end we illustrate (1) the raw data points (without taking the nested nature into account), as well as a plot of the model predicted values and their prediction interval (taking the nested structure into account based; similar to an interaction plot).

# visualize interaction
## Without ML structure
ggplot(data = workerInteractionType,
       aes(x = keyMotiveFulfilled,
           y = thermometerDutch_1,
           fill = OutgroupInteraction)) +
  #geom_point()+
  geom_smooth(method = 'lm',
              aes(linetype = OutgroupInteraction),
              color = "black") +
  #facet_wrap(~PID, ncol = 6)+
  scale_linetype_manual(values = c("dashed", "solid")) +
  scale_fill_manual(values = c("darkgrey", "black")) +
  #scale_colour_manual(values=c("grey20", "black"), name="Intergroup Contact")+
  scale_y_continuous(
    limits = c(50, 100),
    breaks = seq(50, 100, by = 10),
    position = "left"
  ) +
  scale_x_continuous(limits = c(-50, 50), breaks = seq(-50, 50, by = 10)) +
  labs(
    title = "Without ML stucture",
    x = "Fulfillment Core Need",
    y = "Outgroup Attitudes",
    fill = "Intergroup Contact",
    linetype = "Intergroup Contact"
  ) +
  theme_Publication() +
  theme(legend.position = "bottom",
        legend.key.size = unit(1, "cm"))

## With ML structure
# create parameters for prediction
datNew = data.frame(
  keyMotiveFulfilled_cwc = rep(seq(
    round_any(min(
      workerWithinBetween$keyMotiveFulfilled_cwc
    ), 5, f = floor), round_any(max(
      workerWithinBetween$keyMotiveFulfilled_cwc
    ), 5, f = ceiling), 5
  ), 2),
  PID = 0
) %>%
  mutate(OutgroupInteraction = rep(c(1, 2), each = nrow(.)/2)) %>%
  select(PID, OutgroupInteraction, keyMotiveFulfilled_cwc)


# Predict values, clean up and calculate SE
PI <-
  merTools::predictInterval(
    merMod = mdlWorker$lmerSlopesAttCoreInt,
    newdata = datNew,
    level = 0.95,
    stat = "mean",
    type = "linear.prediction",
    include.resid.var = F,
    fix.intercept.variance = F
  )
mdlWorker$predInterceptAttCoreIntX <- 
  cbind(datNew, PI)
mdlWorker$predInterceptAttCoreIntX$se <-
  (mdlWorker$predInterceptAttCoreIntX$upr - mdlWorker$predInterceptAttCoreIntX$fit) / 1.96
rm(datNew, PI)
mdlWorker$predInterceptAttCoreIntX$OutgroupInteractionLab <-
  factor(
    x = mdlWorker$predInterceptAttCoreIntX$OutgroupInteraction,
    levels = sort(
      unique(mdlWorker$predInterceptAttCoreIntX$OutgroupInteraction)
    ),
    labels = c("No", "Yes")
  )


# Plot predicted values with SE
ggplot(
  mdlWorker$predInterceptAttCoreIntX,
  aes(x = keyMotiveFulfilled_cwc,
      y = fit,
      fill = OutgroupInteractionLab)
)+
  #geom_point() +
  geom_line(aes(linetype = OutgroupInteractionLab), size = 1) +
  #facet_wrap(~PID, ncol = 6)+
  geom_ribbon(data = mdlWorker$predInterceptAttCoreIntX,
              aes(ymin = fit - se, ymax = fit + se),
              alpha = 0.3) +
  scale_x_continuous(breaks = seq(
    round_any(min(
      workerWithinBetween$keyMotiveFulfilled_cwc
    ), 5, f = floor), round_any(max(
      workerWithinBetween$keyMotiveFulfilled_cwc
    ), 5, f = ceiling), 10
  )) +
  scale_y_continuous(limits = c(50, 100), breaks = seq(50, 100, 10)) +
  scale_linetype_manual(values = c("dashed", "solid")) +
  scale_fill_manual(values = c("darkgrey", "black")) +
  labs(
    x = "Fulfillment Core Need",
    y = "Outgroup Attitude (NL)",
    fill = "Intergroup Contact",
    linetype = "Intergroup Contact",
    title = "Based on Model Predictions"
  ) +
  theme_Publication()

# #### Bayesian estimation !! ONLY RUN ON FINAL RENDER !! Takes forever ####
# options(mc.cores = parallel::detectCores())  # Run many chains simultaneously
# brmfit <- brm(
#   thermometerDutch_1 ~ keyMotiveFulfilled_cwc * OutgroupInteraction +
#     (1 + keyMotiveFulfilled_cwc + OutgroupInteraction | PID),
#   data = workerWithinBetween,
#   family = gaussian,
#   iter = 1000,
#   chains = 4
# )
# 
# create parameters for prediction:
# datNew = data.frame(
#   keyMotiveFulfilled_cwc = rep(seq(
#     round_any(min(
#       workerWithinBetween$keyMotiveFulfilled_cwc
#     ), 2, f = floor), round_any(max(
#       workerWithinBetween$keyMotiveFulfilled_cwc
#     ), 2, f = ceiling), 2
#   ), 2)
# ) %>%
#   mutate(OutgroupInteraction = rep(c(1, 2), each = nrow(.)/2))
# 
# # Save predicted values and adjust names and labels
# fitavg <-
#   cbind(datNew,
#         fitted(brmfit, newdata = datNew, re_formula = NA)[, -2])
# names(fitavg)[names(fitavg) == "Estimate"] = "pred"
# fitavg$se <- (fitavg$Q97.5 - fitavg$pred) / 1.96
# fitavg$OutgroupInteractionLab <-
#   factor(
#     x = fitavg$OutgroupInteraction,
#     levels = sort(
#       unique(fitavg$OutgroupInteraction)
#     ),
#     labels = c("No", "Yes")
#   )
# 
# # Plot Bayesian SE prediction interval
# ggplot(fitavg,
#        aes(x = keyMotiveFulfilled_cwc,
#            y = pred,
#            fill = OutgroupInteractionLab)) +
#   scale_x_continuous(breaks = seq(
#     round_any(min(
#       workerWithinBetween$keyMotiveFulfilled_cwc
#     ), 5, f = floor), round_any(max(
#       workerWithinBetween$keyMotiveFulfilled_cwc
#     ), 5, f = ceiling), 10
#   )) +
#   scale_y_continuous(limits = c(50, 100), breaks = seq(50, 100, 10)) +
#   geom_line(aes(linetype = OutgroupInteractionLab), size = 1) +
#   geom_ribbon(aes(ymin = pred - se, ymax = pred + se), alpha = 0.2) +
#   scale_linetype_manual(values = c("dashed", "solid")) +
#   scale_fill_manual(values = c("darkgrey", "black")) +
#   labs(
#     x = "Fulfillment Core Need",
#     y = "Outgroup Attitude (NL)",
#     fill = "Intergroup Contact",
#     linetype = "Intergroup Contact",
#     title = "Based on Bayesian Prediction Interval"
#   ) +
#   theme_Publication()
# 
# # plot all overlayed posteriors:
# pst <- posterior_samples(brmfit, "b")
# ggplot(workerWithinBetween,
#        aes(x = keyMotiveFulfilled_cwc, y = thermometerDutch_1)) +
#   geom_point(shape = 4, alpha = .1) +
#   geom_tile() +
#   geom_abline(
#     data = pst,
#     aes(intercept = b_Intercept, slope = b_keyMotiveFulfilled_cwc),
#     alpha = .025,
#     size = .4
#   ) +
#   labs(title = "slope Posteriors",
#        x = "Fulfillment Core Need",
#        y = "Outgroup Attitudes (NL)") +
#   theme_Publication()
# rm(datNew, brmfit, fitavg, pst)

Other psychological needs

In a final step we check whether during the interaction the core situational need is a meaningful predictor even when taking other fundamental psychological needs into account. We focus on the three commonly considered self determination needs: competence, autonomy, and relatedness.

random intercept with level oe predictors

We add the core need fulfillment with the three self determination needs to a random intercept model of outgroup attitudes.

\[\begin{equation} \tag{17} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}Autonomy_{ti} + \\ &\ \beta_{3i}Competence_{ti} + \\ &\ \beta_{4i}Relatedness_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \\ &\ \beta_{2i} = \gamma_{20} \\ &\ \beta_{3i} = \gamma_{30} \\ &\ \beta_{4i} = \gamma_{40} \end{split} \end{equation}\]

# Create and save Model
mdlWorkerOut$Att$lmeInterceptCoreSdt <-
  lme(
    thermometerDutch_1 ~ keymotive_fulfillemt_1_cwc + competence_1_cwc + autonomy_1_cwc + relatedness_1_cwc,
    random = ~ 1 | PID,
    data = workerOutWithinBetween,
    na.action = na.exclude
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlWorkerOut$Att$lmerInterceptCoreSdt <- lmer(
    thermometerDutch_1 ~ keymotive_fulfillemt_1_cwc + competence_1_cwc + autonomy_1_cwc + relatedness_1_cwc + (1 | PID),
    data = workerOutWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 386
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 2804.258
BIC 2831.949
Pseudo-R² (fixed effects) 0.060
Pseudo-R² (total) 0.748
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 72.534 66.809 78.259 24.831 19.338 0.000
keymotive_fulfillemt_1_cwc 0.092 0.038 0.146 3.328 360.361 0.001
competence_1_cwc 0.024 -0.029 0.077 0.877 360.362 0.381
autonomy_1_cwc 0.077 0.012 0.142 2.330 360.362 0.020
relatedness_1_cwc 0.113 0.077 0.149 6.127 360.362 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.171
Residual 7.979
Grouping Variables
Group # groups ICC
PID 21 0.732
# 95% CI
mdlWorkerOut$Att$lmerInterceptCoreSdtCI <- 
  confint(method = "Wald", mdlWorkerOut$Att$lmerInterceptCoreSdt)

# To be compared against a model with only the self determination theory needs
mdlWorkerOut$Att$lmeInterceptSdt <-
  lme(
    thermometerDutch_1 ~ competence_1_cwc + autonomy_1_cwc + relatedness_1_cwc,
    random = ~ 1 | PID,
    data = workerOutWithinBetween,
    na.action = na.exclude
  )

summ(
  mdlWorkerOut$Att$lmerInterceptSdt <- lmer(
    thermometerDutch_1 ~ competence_1_cwc + autonomy_1_cwc + relatedness_1_cwc + (1 | PID),
    data = workerOutWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 386
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 2807.861
BIC 2831.596
Pseudo-R² (fixed effects) 0.053
Pseudo-R² (total) 0.740
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 72.535 66.813 78.258 24.844 19.321 0.000
competence_1_cwc 0.046 -0.007 0.098 1.714 361.346 0.087
autonomy_1_cwc 0.090 0.025 0.155 2.718 361.346 0.007
relatedness_1_cwc 0.118 0.081 0.154 6.303 361.347 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.159
Residual 8.089
Grouping Variables
Group # groups ICC
PID 21 0.726
# Compare new model to previous steps
anova(
  mdlWorkerOut$Att$lmeInterceptSdt,
  mdlWorkerOut$Att$lmeInterceptCoreSdt
  ) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Worker: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 15: Worker: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlWorkerOut\(Att\)lmeInterceptSdt 1 6 2808 2832 -1398
mdlWorkerOut\(Att\)lmeInterceptCoreSdt 2 7 2804 2832 -1395 1 vs 2 5.603 0.018
rm(lmeInterceptCoreRed)

# Save variances
mdlWorkerOut$Att$varInterceptCoreSdt <-
  lme4::VarCorr(mdlWorkerOut$Att$lmeInterceptCoreSdt)

We compare the models of the core need and the SDT need fulfillments to a model that only includes the SDT needs. We find that the core need adds significantly above the SDT needs. We find that next to relatedness, the core need explains the most variance and compared to the model with only the SDT needs, the core need fulfillment flexibly takes on some of the explained variance of all of the three fundamental needs (i.e., reduction in SDT beta weights).

random slope

We again check whether further letting the effects vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{18} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}Autonomy_{ti} + \\ &\ \beta_{3i}Competence_{ti} + \\ &\ \beta_{4i}Relatedness_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \\ &\ \beta_{2i} = \gamma_{20} + u_{2i} \\ &\ \beta_{3i} = \gamma_{30} + u_{3i} \\ &\ \beta_{4i} = \gamma_{40} + u_{4i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlWorkerOut$Att$lmeSlopesCoreSdt <-
  lme(
    thermometerDutch_1 ~
       keymotive_fulfillemt_1_cwc + competence_1_cwc + autonomy_1_cwc + relatedness_1_cwc,
    random = ~ 1 +  keymotive_fulfillemt_1_cwc + competence_1_cwc + autonomy_1_cwc + relatedness_1_cwc | PID,
    control = lmeControl(opt = "optim"),
    data = workerOutWithinBetween,
    na.action = na.exclude
  )

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlWorkerOut$Att$lmerSlopesCoreSdt <- lmer(
    thermometerDutch_1 ~
       keymotive_fulfillemt_1_cwc + competence_1_cwc + autonomy_1_cwc + relatedness_1_cwc +
      (1 +  keymotive_fulfillemt_1_cwc + competence_1_cwc + autonomy_1_cwc + relatedness_1_cwc | PID),
    data = workerOutWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 386
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 2756.844
BIC 2839.917
Pseudo-R² (fixed effects) 0.047
Pseudo-R² (total) 0.859
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 72.525 66.785 78.265 24.764 19.735 0.000
keymotive_fulfillemt_1_cwc 0.094 0.014 0.175 2.295 11.486 0.041
competence_1_cwc 0.049 -0.157 0.255 0.467 8.883 0.652
autonomy_1_cwc 0.056 -0.123 0.234 0.612 9.361 0.555
relatedness_1_cwc 0.096 0.050 0.141 4.133 18.621 0.001
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.281
PID keymotive_fulfillemt_1_cwc 0.128
PID competence_1_cwc 0.450
PID autonomy_1_cwc 0.374
PID relatedness_1_cwc 0.068
Residual 6.412
Grouping Variables
Group # groups ICC
PID 21 0.811
# 95% CI
mdlWorkerOut$Att$lmerSlopesCoreSdtCI <- 
  confint(method = "Wald", mdlWorkerOut$Att$lmerSlopesCoreSdt)

# Compare new model to previous step
anova(mdlWorkerOut$Att$lmeInterceptSdt,
      mdlWorkerOut$Att$lmeInterceptCoreSdt, 
      mdlWorkerOut$Att$lmeSlopesCoreSdt) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Worker: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 16: Worker: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlWorkerOut\(Att\)lmeInterceptSdt 1 6 2808 2832 -1398
mdlWorkerOut\(Att\)lmeInterceptCoreSdt 2 7 2804 2832 -1395 1 vs 2 5.603 0.018
mdlWorkerOut\(Att\)lmeSlopesCoreSdt 3 21 2757 2840 -1358 2 vs 3 74.946 < .001
# Save variances
mdlWorkerOut$Att$varSlopesCoreSdt <- 
  lme4::VarCorr(mdlWorkerOut$Att$lmeSlopesCoreSdt)

# Assumption Checks:
mdlWorkerOut$Att$diagSlopesCoreSdt <- 
  sjPlot::plot_model(mdlWorkerOut$Att$lmerSlopesCoreSdt, type = "diag")
grid.arrange(
  mdlWorkerOut$Att$diagSlopesCoreSdt[[1]],
  mdlWorkerOut$Att$diagSlopesCoreSdt[[2]]$`PID`,
  mdlWorkerOut$Att$diagSlopesCoreSdt[[3]],
  mdlWorkerOut$Att$diagSlopesCoreSdt[[4]]
)

# Plot prediction model
mdlWorkerOut$Att$predSlopesCoreSdt <- 
  workerOutWithinBetween %>%
  filter(!is.na(autonomy_1)) %>%
  select(thermometerDutch_1, session, PID, autonomy_1) %>% 
  mutate(measure = predict(mdlWorkerOut$Att$lmeSlopesCoreSdt,
                           workerOutWithinBetween %>% filter(!is.na(autonomy_1)),
                           re.form = NA
                           )
         )

(
  mdlWorkerOut$Att$predPltSlopesCoreSdt <-
    ggplot(data = mdlWorkerOut$Att$predSlopesCoreSdt, aes(x = session, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = thermometerDutch_1), alpha = 1) +
    facet_wrap( ~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/WorkerOut_PredictionPlot_SlopesAttCoreStd.png",
  mdlWorkerOut$Att$predPltSlopesCoreSdt,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

We find that adding the random slopes does add significantly beyond the random intercept model. We also see that when taking the possibility to varying slopes into account, the coefficient interpretations remains consistent (i.e., core need and relatedness remain the strongest and only significant predictors). Note however a slight reduction in the p value of the core need fulfillment.

Student Sample

The second sample we assess is larger study with international psychology students at Western European university. The hypotheses mirror those of the first study and are re-iterated here:

  1. Based on the most general understanding of the contact hypothesis, an increase in frequency and quality of contact should jointly account for changes in more favorable outgroup attitudes.
    1. Participants with more intergroup interactions should have a more favorable outgroup attitudes.
    2. Outgroup attitudes should be higher after an intergroup interaction compared to a non-outgroup interaction.
    3. Participants with more intergroup interactions should have a more favorable outgroup attitudes depending on the average interaction quality.
  2. Based on our proposal, intergroup interactions with higher situational core need fulfillment should predict more favorable outgroup attitudes due to more positive interaction quality perceptions.
    1. Outgroup attitudes should be more favorable after intergroup interactions with high key need fulfillment.
    2. Interaction Quality should be perceived as more positive after intergroup interactions with higher key need fulfillment.
    3. The variance explained in outgroup attitudes by key need fulfillment should to a large extend be assumed by interaction quality.
    4. The effect of key need fulfillment on outgroup attitudes should be specific to intergroup interactions and not be due to need fulfillment in general. Thus, the effect of key need fulfillment on outgroup attitudes should stronger for intergroup interact than for ingroup interactions.
    5. The effect of key need fulfillment on outgroup attitudes should be persist even when taking other fundamental psychological needs into account. Thus, the effect of key need fulfillment on outgroup attitudes should remain strong even after controlling for autonomy, competence, and relatedness fulfillment during the interaction (cf., self-determination theory).

We will test our main hypotheses for this study in a sequential manner.

Data Description

Participants

# summarize participant characteristics
studentSampleInfo <- 
  dtStudents$full %>%
  mutate(gender = as.factor(ifelse(.$Gender == 1, "women", ifelse(.$Gender == 2, "man", ifelse(.$Gender == 3, "other", NA))))) %>%
  group_by(PID) %>%
  summarise(
    dailiesN = n(), 
    morningN = sum(periodMA=="morning"),
    afternoonN = sum(periodMA=="afternoon"),
    age = age,
    gender = gender,
    nationality = nationality
  ) %>%
  distinct

# look at frequencies of characteristics 
studentSampleInfo %>% 
  ungroup %>%
  select(
    "Number of Measurements" = dailiesN,
    Age = age,
    Gender = gender,
    Nationality = nationality
  ) %>%
  mutate(
    Nationality = as.character(Nationality)
  ) %>%
  tbl_summary(.,
              sort = list(everything() ~ "frequency"))
Characteristic N = 1131
Number of Measurements 48 (34, 56)
Age 20 (19, 20)
Gender
women 84 (74%)
man 29 (26%)
Nationality
Germany 71 (63%)
United Kingdom 6 (5.3%)
Ireland 5 (4.4%)
Romania 5 (4.4%)
Bulgaria 3 (2.7%)
Finland 2 (1.8%)
Greece 2 (1.8%)
Italy 2 (1.8%)
United States 2 (1.8%)
Austria 1 (0.9%)
China 1 (0.9%)
Croatia 1 (0.9%)
Cyprus 1 (0.9%)
Estonia 1 (0.9%)
France 1 (0.9%)
Georgia 1 (0.9%)
Hungary 1 (0.9%)
India 1 (0.9%)
Israel 1 (0.9%)
Slovakia 1 (0.9%)
Spain 1 (0.9%)
Sweden 1 (0.9%)
Turkey 1 (0.9%)
Vietnam 1 (0.9%)

1 Median (IQR); n (%)

Interactions

# duration of survey should include median and MAD
studentInteractions <- dtStudents$full %>%
  dplyr::select(created.daily, ended.daily) %>%
  mutate_all(ymd_hms) %>%
  mutate(duration = as.numeric(ended.daily-created.daily)) %>%
  select(duration)

studentInteractions %>%
  as.data.frame %>%
  psych::describe(., trim = .2) %>%
  as.data.frame %>%
  mutate(vars = c("Duration [in seconds]"), # rownames(.),
         na = nrow(dtStudents$full)-n,
         win.mean = sapply(studentInteractions,psych::winsor.mean,simplify=T),
         win.sd = sapply(studentInteractions,psych::winsor.sd,simplify=T)) %>%
  dplyr::select(characteristic = vars, n, na, 
                mean, `mean win` = win.mean, `mean trim` = trimmed, median,
                sd, `sd win` = win.sd, MAD = mad, min, max,
                skew, kurtosis) %>%
  kbl(., 
      #label = "",
      caption = "Study 2: Duration of Measurement in Seconds",
      format = "html", 
      #linesep = "",
      #booktabs = T,
      row.names = F,
      digits = 2,
      align = c('l', rep('c', ncol(.)-1)))  %>%
  add_header_above(., c(" " = 3,"Centrality" = 4, "Dispersion" = 5, "Distribution" = 2)) %>%
  footnote(general = "'na' indicates the number of measurements for which measurement duration is unknown.") %>%
  kable_classic(full_width = F, 
                lightable_options = "hover", 
                html_font = "Cambria")
Table 17: Study 2: Duration of Measurement in Seconds
Centrality
Dispersion
Distribution
characteristic n na mean mean win mean trim median sd sd win MAD min max skew kurtosis
Duration [in seconds] 4965 0 536.5 245.1 235.8 226 1694 84.41 114.2 -63 22595 8.54 84.53
Note:
‘na’ indicates the number of measurements for which measurement duration is unknown.
studentInteractionType %>%
  select(OutgroupInteraction,
         NonOutgroupInteraction) %>%
  tbl_summary(.,
              sort = list(everything() ~ "frequency"))
Characteristic N = 4,9651
OutgroupInteraction 935 (19%)
NonOutgroupInteraction 2,941 (59%)

1 n (%)

Variable distributions

# calculate correlations and descriptives
studentMlCor <-
  MlCorMat(
    data = studentInteractionType,
    id = "PID",
    selection = c("KeyNeedFullfillment", "Competence", "Autonomy", "Relatedness", "quality_overall", "AttitudesDutch"),
    labels = c("Core Need", "Competence", "Autonomy", "Relatedness", "Quality", "Attitudes NL")
  ) 

studentMlCor %>%
  kable(
    .,
    caption = "Student: Multilevel Core Variable Descriptives",
    format = "html",
    #booktabs = TRUE,
    linesep = linesep(c(ncol(.))),
    align = c("l", rep("c", ncol(.) - 1))
  ) %>%
  pack_rows("Correlations", 1, ncol(studentMlCor)) %>%
  pack_rows("Descriptives", ncol(studentMlCor)+1, nrow(studentMlCor)) %>%
  footnote(
    general = c(
      "Upper triangle: Between-person correlations;",
      "Lower triangle: Within-person correlations;",
      "*** p < .001, ** p < .01,  * p < .05"
    )
  ) %>%
  kable_classic(full_width = F, 
                lightable_options = "hover", 
                html_font = "Cambria")
Table 18: Student: Multilevel Core Variable Descriptives
Core Need Competence Autonomy Relatedness Quality Attitudes NL
Correlations
Core Need 0.60*** 0.66*** 0.43*** 0.78*** 0.11
Competence 0.24*** 0.71*** 0.65*** 0.74*** 0.09
Autonomy 0.15*** 0.34*** 0.56*** 0.67*** -0.06
Relatedness 0.45*** 0.29*** 0.42*** 0.54*** -0.11
Quality 0.17*** 0.39*** 0.08*** 0.05** 0.10
Attitudes NL 0.35*** 0.43*** 0.11*** 0.10*** 0.12***
Descriptives
Grand Mean 84.87 72.55 82.59 61.21 83.77 67.26
Between SD 9.17 14.47 11.21 13.36 9.12 18.64
Within SD 20.33 21.17 16.06 28.74 16.80 9.40
ICC(1) 0.15 0.30 0.32 0.17 0.20 0.80
ICC(2) 0.89 0.95 0.95 0.90 0.88 0.99
Note:
Upper triangle: Between-person correlations;
Lower triangle: Within-person correlations;
*** p < .001, ** p < .01, * p < .05
# calculate correlations and descriptives
studentOutMlCor <-
  MlCorMat(
    data = studentInteractionType %>% filter(OutgroupInteraction == "Yes"),
    id = "PID",
    selection = c("KeyNeedFullfillment", "Competence", "Autonomy", "Relatedness", "quality_overall", "AttitudesDutch"),
    labels = c("Core Need", "Competence", "Autonomy", "Relatedness", "Quality", "Attitudes NL")
  ) 

studentOutMlCor %>%
  kable(
    .,
    caption = "Student: Multilevel Core Variable Descriptives (Outgroup Contact Only)",
    format = "html",
    #booktabs = TRUE,
    linesep = linesep(c(ncol(.))),
    align = c("l", rep("c", ncol(.) - 1))
  ) %>%
  pack_rows("Correlations", 1, ncol(studentMlCor)) %>%
  pack_rows("Descriptives", ncol(studentMlCor)+1, nrow(studentMlCor)) %>%
  footnote(
    general = c(
      "Upper triangle: Between-person correlations;",
      "Lower triangle: Within-person correlations;",
      "*** p < .001, ** p < .01,  * p < .05"
    )
  ) %>%
  kable_classic(full_width = F, 
                lightable_options = "hover", 
                html_font = "Cambria")
Table 19: Student: Multilevel Core Variable Descriptives (Outgroup Contact Only)
Core Need Competence Autonomy Relatedness Quality Attitudes NL
Correlations
Core Need 0.44** 0.49*** -0.22 0.51*** 0.14
Competence 0.23*** 0.58*** 0.40** 0.55*** 0.21
Autonomy 0.12*** 0.24*** 0.22 0.61*** 0.05
Relatedness 0.48*** 0.31*** 0.38*** 0.35* -0.09
Quality 0.19*** 0.40*** 0.16*** 0.15*** 0.16
Attitudes NL 0.32*** 0.36*** 0.19*** 0.23*** 0.30***
Descriptives
Grand Mean 86.86 73.23 78.58 60.30 78.80 70.41
Between SD 11.20 13.95 14.07 17.35 10.71 17.13
Within SD 15.87 16.81 14.24 26.14 17.88 9.87
ICC(1) 0.14 0.27 0.40 0.19 0.14 0.72
ICC(2) 0.58 0.76 0.85 0.67 0.59 0.96
Note:
Upper triangle: Between-person correlations;
Lower triangle: Within-person correlations;
*** p < .001, ** p < .01, * p < .05

Contact Hypothesis

We again test the most general contact hypothesis in two steps. First, we assess whether more intergroup interactions are related to to more positive outgroup attitudes. Second, we test whether a potential positive effect on outgroup attitudes depends on the interaction quality (jointly with the number of interactions).

Interaction Frequency and Attitudes

To test the impact of the overall number of interactions, we hope to find a significant relationship between the number of interactions a participant had and the average outgroup attitude.

\[\begin{equation} \tag{19} r_{ContactFrequency, OutgroupAttitudes} \neq 0 \end{equation}\]

# correlation panel
pairs.panels.new(
  studentContactFreq %>% select(SumContactNL, SumContactNLAll, AvAttitude),
  labels = c(
    "Sum:\nNumer of beeps with Outgroup Contact (NL)",
    "Sum:\nNumber of Outgroup Contacts (NL)",
    "Mean:\nOutgroup Attitudes (NL)"
  )
)

# correlation panel with interaction sums winsorized
pairs.panels.new(
  studentContactFreq %>% select(WinSumContactNL, WinSumContactNLAll, AvAttitude),
  labels = c(
    "Sum:\nNumer of beeps with Outgroup Contact (NL)\n[Winsorized]",
    "Sum:\nNumber of Outgroup Contacts (NL)\n[Winsorized]",
    "Mean:\nOutgroup Attitudes (NL)"
  )
)

We find that both the number of interactions and the number of measurement beeps with an interaction are significantly related with the average outgroup attitudes. This is to say that within our data, participants with more outgroup interactions did have significantly more positive outgroup attitudes. This is inconsistent with the results we found in the worker sample.

Outgroup Attitudes by Interaction Type

In a next step we take into account that having an interaction with an outgroup member, does not happen in a social vacuum. Participants who indicated that they had an interaction with an outgroup member include measurement occasions during which someone either only had an interaction with an outgroup member as well as times during which a person also had interaction(s) with a non-Dutch person. Inversely, participants who indicated that they did not have an interaction with a Dutch person might either have had no interaction at all or had an interaction with a non-Dutch person. We, thus consider all possible combinations and their independent influences on outgroup attitudes.

We first assess the impact of the different interaction types across all measurement points (lumping all beeps together).

\[\begin{equation} \tag{20} Attitude = OutgroupInteraction + NonOutgroupInteraction \end{equation}\]

# between participants interaction type
studentAttInteractionType <- studentInteractionType %>%
  select(
    PID,
    OutgroupInteraction,
    NonOutgroupInteraction,
    Attitude = AttitudesDutch
  ) %>%
  mutate(InteractionType = paste(
    ifelse(OutgroupInteraction == "Yes", "Out", ifelse(OutgroupInteraction == "No", "NoOut", NA)),
    ifelse(NonOutgroupInteraction == "Yes", "In", ifelse(NonOutgroupInteraction == "No", "NoIn", NA)),
    sep = ", "
  ))

# violin plot of attitudes by interaction type group
ggplot(studentAttInteractionType, aes(y=Attitude, x=OutgroupInteraction, group = interaction(OutgroupInteraction, NonOutgroupInteraction), fill=NonOutgroupInteraction))+
  geom_violin(trim = T, width=.8, position = position_dodge(0.8)) +
  geom_boxplot(width=0.04,
               position = position_dodge(.8),
               outlier.colour = NULL, 
               outlier.shape = NA, 
               notch=F, fill="black", 
               lwd=1, color="black") + 
  # geom_jitter(width = 0.15,
  #            shape=18,
  #            alpha=.2, size=1) +
  stat_summary(fun.y=mean, 
               geom="point", 
               shape=16, 
               size=1, 
               position = position_dodge(0.8),
               colour="white") +
  #stat_summary(fun.data="mean_sdl",  
  #             fun.args = list(mult=1), 
  #             geom="pointrange", 
  #             color = "red", 
  #             width=.2)+
  stat_summary(geom = "crossbar", 
               width=0.04, 
               fatten=0, 
               position = position_dodge(0.8),
               color="white", 
               fun.data = function(x){ return(c(y=median(x), 
                                                ymin=median(x), 
                                                ymax=median(x))) })+
  ylab("Outgroup Attitudes")+
  xlab("Outgroup Interaction") +
  ggtitle("Violin Plot:\nOutgroup Attitudes by Interaction Type")+
  scale_y_continuous(
    limits = c(0, 100),
    breaks = c(0, 15, 30, 40, 50, 60, 70, 85, 100),
    labels = c(
      "Very cold or unfavorable feelings 0°",
      "Quite cold and unfavorable feelings 15°",
      "Fairly cold and unfavorable feelings 30°",
      "A bit cold and unfavorable feelings 40°",
      "No feeling at all 50°",
      "A bit warm and favorable feelings 60°",
      "Fairly warm and favorable feelings 70° ",
      "Quite warm and favorable feelings 85° ",
      "Very warm and favorable feelings 100° "
    )
  ) +
  #coord_flip()+
  scale_fill_brewer(palette = "Pastel2", name = "Non-Outgroup Interaction")+
  theme_Publication()+
  theme(strip.background =element_rect(fill="black", color="black"),
        strip.text = element_text(colour = 'white', face="bold"))#,

        #panel.border = element_rect(color="grey"),
        #legend.position="none")


# between participants interaction type
studentContactType <- studentInteractionType %>%
  group_by(
    OutgroupInteraction,
    NonOutgroupInteraction
  ) %>%
  summarise(
    n = n(),
    AttitudeM = mean(AttitudesDutch, na.rm = TRUE),
    AttitudeSD = sd(AttitudesDutch, na.rm = TRUE),
    AttitudeSE = AttitudeSD / sqrt(n),
    AttitudeLwr = AttitudeM - 1.96 * AttitudeSE,
    AttitudeUpr = AttitudeM + 1.96 * AttitudeSE
  ) %>%
  ungroup()

# plot bar chart (alternative with less information about actual data)
studentAttInteractionTypeBar <- ggplot(
  studentContactType,
  aes(
    y = AttitudeM,
    x = OutgroupInteraction,
    fill = NonOutgroupInteraction
  )
) +
  geom_bar(
    stat = "identity",
    color = "black",
    position = position_dodge()
  ) +
  geom_errorbar(aes(ymin = AttitudeM, ymax = AttitudeUpr),
    width = .2,
    position = position_dodge(.9)
  ) +
  labs(
    fill = "Non-Outgroup Interaction",
    x = "Outgroup Interaction",
    y = "Outgroup Attitude",
    title = "Outgroup Attitudes by Interaction Type [95% CI]"
  ) +
  scale_fill_grey(
    start = 0.2,
    end = 0.8
  ) +
  scale_y_continuous(
    limits = c(0, 100),
    breaks = c(0, 15, 30, 40, 50, 60, 70, 85, 100),
    labels = c(
      "Very cold or unfavorable feelings 0°",
      "Quite cold and unfavorable feelings 15°",
      "Fairly cold and unfavorable feelings 30°",
      "A bit cold and unfavorable feelings 40°",
      "No feeling at all 50°",
      "A bit warm and favorable feelings 60°",
      "Fairly warm and favorable feelings 70° ",
      "Quite warm and favorable feelings 85° ",
      "Very warm and favorable feelings 100° "
    )
  ) +
  theme_Publication()
# create list to store student models
mdlStudent <- list()

# regression
mdlStudent$lmAttInt <-
  lm(AttitudesDutch ~ OutgroupInteraction * NonOutgroupInteraction,
    data = studentInteractionType
  )
# summary(lmstudentAttInteraction)

summ(
  mdlStudent$lmAttInt,
  confint = TRUE,
  digits = 3,
  center = TRUE
)
Observations 4965
Dependent variable AttitudesDutch
Type OLS linear regression
F(3,4961) 37.435
0.022
Adj. R² 0.022
Est. 2.5% 97.5% t val. p
(Intercept) 66.807 65.815 67.799 132.053 0.000
OutgroupInteraction 8.159 5.699 10.619 6.503 0.000
NonOutgroupInteraction -1.226 -2.529 0.077 -1.845 0.065
OutgroupInteraction:NonOutgroupInteraction -0.355 -3.439 2.730 -0.225 0.822
Standard errors: OLS; Continuous predictors are mean-centered.

We find that while controlling for interactions with non-Dutch people, outgroup attitudes were significantly higher when participants had an interaction with a Dutch person. The effect is of a medium size (8.16 points on a 0–100 scale). However, this analysis lumps all ESM beeps from every participants together and ignores that the data is nested within participants.

Interaction Frequency and Interaction Quality

In a final step we check whether the effect outgroup interactions, in part, depends on the quality during the interactions. Because we can only assess interaction quality when there is an interaction, it is difficult to assess this with the interaction dummy as a within person predictor. Instead, we will use an aggregate measure of interaction quality and average interaction quality to consider the two predictors jointly.

\[\begin{equation} \tag{21} Attitude = ContactFreq \times AverageContactQual \end{equation}\]

# correlation panel
pairs.panels.new(
  studentContactFreq %>% select(SumContactNL, SumContactNLAll, AvQuality, AvAttitude),
  labels = c(
    "Sum:\nNumer of beeps with Outgroup Contact (NL)",
    "Sum:\nNumber of Outgroup Contacts (NL)",
    "Mean:\nInteraction Quality",
    "Mean:\nOutgroup Attitudes (NL)"
  )
)

# correlation panel with interaction sums winsorized
pairs.panels.new(
  studentContactFreq %>% select(WinSumContactNL, WinSumContactNLAll, AvQuality, AvAttitude),
  labels = c(
    "Sum:\nNumer of beeps with Outgroup Contact (NL)\n[Winsorized]",
    "Sum:\nNumber of Outgroup Contacts (NL)\n[Winsorized]",
    "Mean:\nInteraction Quality",
    "Mean:\nOutgroup Attitudes (NL)"
  )
)

Within the data, we find no significant correlation between the participants’ Average Interaction Quality and their Average Outgroup Attitudes. Thus, within our data participants with a higher quality outgroup interactions did not hold more positive attitudes towards that group. However, the frequency of intergroup interactions had a meaningful correlation with both the average interaction quality or average outgroup attitudes.

# center remaining 
studentContactFreq <-
  studentContactFreq %>%
  mutate(
    SumContactNL_c = SumContactNL - mean(SumContactNL, na.rm = TRUE),
    SumContactNLAll_c = SumContactNLAll - mean(SumContactNLAll, na.rm = TRUE),
    AvAttitude_c = AvAttitude - mean(AvAttitude, na.rm = TRUE),
    AvQuality_c = AvQuality - mean(AvQuality, na.rm = TRUE)
  )

# regression
mdlStudent$lmAttFreqQualX <-
  lm(AvAttitude ~ SumContactNL_c * AvQuality_c, data = studentContactFreq)

summ(
  mdlStudent$lmAttFreqQualX,
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 113
Dependent variable AvAttitude
Type OLS linear regression
F(3,109) 3.748
0.094
Adj. R² 0.069
Est. 2.5% 97.5% t val. p
(Intercept) 67.618 64.156 71.080 38.707 0.000
SumContactNL_c 0.805 0.320 1.290 3.291 0.001
AvQuality_c 0.253 -0.135 0.641 1.292 0.199
SumContactNL_c:AvQuality_c 0.023 -0.032 0.078 0.819 0.414
Standard errors: OLS
# Partial Eta Squared
mdlStudent$lmAttFreqQualXEta <-
  effectsize::eta_squared(mdlStudent$lmAttFreqQualX, partial = TRUE)


# Interaction Plots
interactions::interact_plot(
  mdlStudent$lmAttFreqQualX,
  pred = SumContactNL_c,
  modx = AvQuality_c,
  interval = TRUE,
  partial.residuals = TRUE
)

interactions::johnson_neyman(mdlStudent$lmAttFreqQualX,
                             pred = SumContactNL_c,
                             modx = AvQuality_c,
                             alpha = .05)
## JOHNSON-NEYMAN INTERVAL 
## 
## When AvQuality_c is INSIDE the interval [-9.97, 16.41], the slope of SumContactNL_c is p < .05.
## 
## Note: The range of observed values of AvQuality_c is [-22.27, 16.23]

We find that in our student sample there is only a relationship between the number of outgroup contacts but no significant effect of average perceived contact quality. Nor do we find that in this sample the impact of the number of interactions is moderated by the average contact quality. This is not entirely consistent with the sojourner sample, where average contact quality did have a meaningful effect on outgroup attitudes. This effect is not necessarily surprising given that the variables aggregate all within person variation and there were substantially more measurements where participants did not have an interaction (but reported their outgroup attitudes) than measurements that followed an outgroup contact.

Multilevel Regression

We, thus, proceed with a multilevel analysis, which acknowledges that the measurements are nested within participants.

Unconditional model

We start by creating an empty random intercept model (i.e., let the outgroup attitude intercept be different between participants; unconditional model).

\[\begin{equation} \tag{22} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \end{split} \end{equation}\]

# Create and save Model
mdlStudent$lmerAttNull <-
  lme4::lmer(AttitudesDutch ~ 1 + (1 | PID),
    data = dtStudents$full
  ) # use optim if it does not converge

mdlStudent$lmeAttNull <-
  lme(
    AttitudesDutch ~ 1,
    random = ~ 1 | PID,
    data = dtStudents$full,
    control = list(opt = "nlmimb")
  ) # use optim if it does not converge

# Get summary with p-values (Satterthwaite's method)
# summary(mdlStudent$lmerAttNull) #or with the lme function
summ(mdlStudent$lmerAttNull, digits = 3)
Observations 4965
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 36740.926
BIC 36760.456
Pseudo-R² (fixed effects) 0.000
Pseudo-R² (total) 0.801
Fixed Effects
Est. S.E. t val. d.f. p
(Intercept) 67.290 1.751 38.435 111.704 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 18.540
Residual 9.235
Grouping Variables
Group # groups ICC
PID 113 0.801
# Save variances
mdlStudent$varAttNull <- 
  VarCorr(mdlStudent$lmeAttNull) # save variances
# The estimate of (between-group or Intercept variance, tau_{00}^2):
mdlStudent$tauAttNull <- 
  as.numeric(mdlStudent$varAttNull[1])
# and the estimate of (within-group or residual variance, sigma^2) is:
mdlStudent$sigmaAttNull <- 
  as.numeric(mdlStudent$varAttNull[2])
# The ICC estimate (between/between+within) is:
mdlStudent$IccAttNull <-
  (as.numeric(mdlStudent$varAttNull[1]) / (as.numeric(mdlStudent$varAttNull[1]) + as.numeric(mdlStudent$varAttNull[2])))
mdlStudent$IccPercAttNull <-
  ((as.numeric(mdlStudent$varAttNull[1]) / (as.numeric(mdlStudent$varAttNull[1]) + as.numeric(mdlStudent$varAttNull[2])))) * 100

We then compare the random intercept model to a model without a random intercept (i.e., without levels at all).

# Create and save Model
mdlStudent$glsAttNull  <-
  gls(AttitudesDutch ~ 1,
      data = dtStudents$full,
      control = list(opt = "nlmimb"))

# calculate Deviances manually:
mdlStudent$DevianceGlsNull <- logLik(mdlStudent$glsAttNull) * -2
mdlStudent$DevianceMlNull <- logLik(mdlStudent$lmeAttNull) * -2

# Compare the two null models:
anova(mdlStudent$glsAttNull,
      mdlStudent$lmeAttNull) %>% 
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  add_rownames(., var = "Description") %>%
  mutate(Description = gsub(".*\\$", "", Description)) %>%
  kbl(
    .,
    caption = "Student: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 20: Student: Model Comparison
Description Model df AIC BIC logLik Test L.Ratio p-value
glsAttNull 1 2 44352 44365 -22174
lmeAttNull 2 3 36741 36760 -18367 1 vs 2 7613.538 < .001

Comparing the two empty model, we find that there is indeed a significant amount of variance explained by including a random intercept.

To assess the variances within and between participants we look at the ICC \(\tau_{00}^2 / (\tau_{00}^2 + \sigma^2)\): The ratio of the between-cluster variance to the total variance is called the Intraclass Correlation. It tells you the proportion of the total variance in Y that is accounted for by the clustering. (In this case the clustering means clustering observations per participant).

We find that an estimated 80.12% of the variation in Feeling Thermometer scores is explained by between participant differences (clustering by PID). This is to say that 80.12% of the variance in any individual report of Attitudes towards the Dutch can be explained by the properties of the individual who provided the rating. And we find that including ‘participant’ as a predictor adds significantly to the model.

random intercept with predictors

To this random intercept model we now add the two types of interactions possible at each measurement point as contemporaneous predictors of outgroup attitudes. That is: We check whether within participants having an outgroup interaction (or a non-outgroup interaction) is associated with more positive outgroup attitudes at the same measurement point.

\[\begin{equation} \tag{23} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}OutgroupInteraction_{ti} + \\ &\ \beta_{2i}NonOutgroupInteraction_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \\ &\ \beta_{2i} = \gamma_{20} \end{split} \end{equation}\]

# Create and save Model
mdlStudent$lmeInterceptAttType <-
  lme(
    AttitudesDutch ~ OutgroupInteraction + NonOutgroupInteraction,
    random =  ~ 1 | PID,
    data = studentInteractionType
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlStudent$lmerInterceptAttType <- lmer(
    AttitudesDutch ~ OutgroupInteraction + NonOutgroupInteraction + (1 | PID),
    data = studentInteractionType
  ),
  confint = TRUE,
  digits = 3,
  center = TRUE
)
Observations 4965
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 36684.392
BIC 36716.943
Pseudo-R² (fixed effects) 0.003
Pseudo-R² (total) 0.801
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 66.800 63.374 70.226 38.217 114.319 0.000
OutgroupInteraction 2.882 2.162 3.601 7.852 4862.093 0.000
NonOutgroupInteraction -0.133 -0.708 0.443 -0.453 4861.600 0.651
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 18.400
Residual 9.180
Grouping Variables
Group # groups ICC
PID 113 0.801
# 95%CI
mdlStudent$lmerInterceptAttTypeCI <- 
  confint(method = "Wald", mdlStudent$lmerInterceptAttType)

# Compare new model to previous step
anova(mdlStudent$lmeAttNull, 
      mdlStudent$lmeInterceptAttType) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  add_rownames(., var = "Description") %>%
  mutate(Description = gsub(".*\\$", "", Description)) %>%
  kbl(
    .,
    caption = "Student: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 21: Student: Model Comparison
Description Model df AIC BIC logLik Test L.Ratio p-value
lmeAttNull 1 3 36741 36760 -18367
lmeInterceptAttType 2 5 36684 36717 -18337 1 vs 2 60.533 < .001
# Save variances
mdlStudent$varInterceptAttType <- 
  lme4::VarCorr(mdlStudent$lmeInterceptAttType)

# The estimate of between-group (or Intercept variance) explained:
# Variance Explained = 1 – (Var with Predictor/Var without Predictor)
mdlStudent$varBtwInterceptAttType <-
  1 - (as.numeric(mdlStudent$varInterceptAttType[1]) / as.numeric(mdlStudent$varAttNull[1]))
mdlStudent$varBtwPercInterceptAttType <-
  (1 - (as.numeric(mdlStudent$varInterceptAttType[1]) / as.numeric(mdlStudent$varAttNull[1]))) * 100
# and the estimate of within-group (or residual variance) explained is:
mdlStudent$varWithinInterceptAttType <-
  1 - (as.numeric(mdlStudent$varInterceptAttType[2]) / as.numeric(mdlStudent$varAttNull[2]))
mdlStudent$varWithinPercInterceptAttType <-
  (1 - (as.numeric(mdlStudent$varInterceptAttType[2]) / as.numeric(mdlStudent$varAttNull[2]))) * 100

We find that a random intercept model with the two interaction types as predictors explains significantly more variance then an empty random intercept model. Looking at the individual coefficients, we find that having an outgroup interaction is indeed associated with significantly more positive outgroup attitudes, while having an interaction with a non-Dutch person does not significantly relate to more positive or negative attitudes towards the Dutch.

TL;DR: Interaction with Dutch is great predictor, interactions with non-Dutch is not.

random slope

In a next step, we check whether further letting the effect of the different interaction types vary between participants explains additional variance in outgroup attitudes (i.e., random slopes).

\[\begin{equation} \tag{24} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}OutgroupInteraction_{ti} + \\ &\ \beta_{2i}NonOutgroupInteraction_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \\ &\ \beta_{2i} = \gamma_{20} + u_{2i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlStudent$lmeSlopesAttType <- lme(
  AttitudesDutch ~
    OutgroupInteraction + NonOutgroupInteraction,
  random = ~ 1 + OutgroupInteraction + NonOutgroupInteraction | PID,
  control = lmeControl(opt = "optim"),
  data = studentInteractionType
)

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlStudent$lmerSlopesAttType <- lmer(
    AttitudesDutch ~
      OutgroupInteraction + NonOutgroupInteraction +
      (1 + OutgroupInteraction + NonOutgroupInteraction | PID),
    data = studentInteractionType
  ), 
  confint = TRUE,
  digits = 3
)
Observations 4965
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 36476.495
BIC 36541.597
Pseudo-R² (fixed effects) 0.003
Pseudo-R² (total) 0.817
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 66.717 63.293 70.141 38.191 111.810 0.000
OutgroupInteractionYes 2.992 1.448 4.537 3.797 93.820 0.000
NonOutgroupInteractionYes 0.009 -0.763 0.781 0.022 108.800 0.982
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 18.375
PID OutgroupInteractionYes 6.996
PID NonOutgroupInteractionYes 2.689
Residual 8.801
Grouping Variables
Group # groups ICC
PID 113 0.813
# 95%CI
mdlStudent$lmerSlopesAttTypeCI <- 
  confint(method = "Wald", mdlStudent$lmerSlopesAttType)

# Compare new model to previous step
anova(mdlStudent$lmeAttNull,
      mdlStudent$lmeInterceptAttType, 
      mdlStudent$lmeSlopesAttType) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  add_rownames(., var = "Description") %>%
  mutate(Description = gsub(".*\\$", "", Description)) %>%
  kbl(
    .,
    caption = "Student: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 22: Student: Model Comparison
Description Model df AIC BIC logLik Test L.Ratio p-value
lmeAttNull 1 3 36741 36760 -18367
lmeInterceptAttType 2 5 36684 36717 -18337 1 vs 2 60.533 < .001
lmeSlopesAttType 3 10 36476 36542 -18228 2 vs 3 217.896 < .001
# Save variances
mdlStudent$varSlopesAttType <- 
  lme4::VarCorr(mdlStudent$lmeSlopesAttType)

# Assumption Checks:
mdlStudent$diagSlopesAttType <-
  sjPlot::plot_model(mdlStudent$lmerSlopesAttType, type = "diag")
grid.arrange(
  mdlStudent$diagSlopesAttType[[1]],
  mdlStudent$diagSlopesAttType[[2]]$`PID`,
  mdlStudent$diagSlopesAttType[[3]],
  mdlStudent$diagSlopesAttType[[4]]
)

# Plot prediction model
mdlStudent$predSlopesAttType <- 
  studentInteractionType %>%
  select(AttitudesDutch, TIDnum, PID) %>% 
  mutate(measure = predict(mdlStudent$lmeSlopesAttType,
                           studentInteractionType,
                           re.form = NA
                           )
         )

(
  mdlStudent$predPltSlopesAttType <-
    ggplot(data = mdlStudent$predSlopesAttType %>% filter(PID %in% studentPltIDs), 
           aes(x = TIDnum, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = AttitudesDutch), alpha = 1) +
    facet_wrap(~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/Student_PredictionPlot_SlopesAttType.png",
  mdlStudent$predPltSlopesAttType,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

We find that adding the random slopes does add significantly beyond the random intercept model. This is is different to the previous study where the random slope did not add significantly.

TL;DR: Random slopes adds significantly.

Need Fulfillment

The main proposal of our article is that the success of an outgroup interaction might be explained by whether or not the interaction fulfilled the person’s core situational need. This should, in turn, be due to a higher perceived interaction quality. We will this sequentially test whether the fulfillment of the core need during an interaction is (1) related to more positive outgroup attitudes, (2) higher perceived interaction quality, and (3) whether the variance explained by the core need is assumed by the perceived interaction quality if considered jointly.

Need fulfillment and Attitudes

In a first step we, thus, test the relationship between outgroup attitudes and the fulfillment of the core situational need during the interaction.

Unconditional model

We again start by creating an empty random intercept model (i.e., let the outgroup attitude intercept be different between participants; unconditional model). Note that this unconditional model differs from the empty model created earlier because for this set of analyses we will only consider the subsample of measurement points during which an outgroup interaction was reported. This is necessary because measurements of needs during the interaction and perceived interaction quality are only meaningful within an interaction context.

# see how large our outgroup interaction subset actually is
tbl_cross(
  studentInteractionType,
  row = OutgroupInteraction,
  col = NonOutgroupInteraction,
  percent = "row"
)
Characteristic NonOutgroupInteraction Total
No Yes
OutgroupInteraction
No 1,695 (42%) 2,335 (58%) 4,030 (100%)
Yes 329 (35%) 606 (65%) 935 (100%)
Total 2,024 (41%) 2,941 (59%) 4,965 (100%)
# create outgroup interaction subset
studentOutgroupInteraction <- studentInteractionType %>%
  filter(OutgroupInteraction == "Yes")

# create empty list to organize models
mdlStudentOut <- 
  list(
    Att = list(),
    Qlt = list()
  )

\[\begin{equation} \tag{25} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \end{split} \end{equation}\]

# Create and save Model
mdlStudentOut$Att$lmerNull <-
  lme4::lmer(AttitudesDutch ~ 1 + (1 | PID), 
             data = studentOutgroupInteraction) # use optim if it does not converge

mdlStudentOut$Att$lmeNull <-
  lme(
    AttitudesDutch ~ 1,
    random = ~ 1 | PID,
    data = studentOutgroupInteraction,
    control = list(opt = "nlmimb")
  ) # use optim if it does not converge

# Get summary with p-values (Satterthwaite's method)
# summary(Null.Out.ML.r) #or with the lme function
summ(mdlStudentOut$Att$lmerNull, digits = 3, center = TRUE)
Observations 935
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 7250.070
BIC 7264.591
Pseudo-R² (fixed effects) 0.000
Pseudo-R² (total) 0.724
Fixed Effects
Est. S.E. t val. d.f. p
(Intercept) 70.736 1.609 43.954 102.806 0.000
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 16.042
Residual 9.898
Grouping Variables
Group # groups ICC
PID 108 0.724
# Save variances
mdlStudentOut$Att$varNull <- 
  VarCorr(mdlStudentOut$Att$lmeNull) # save variances
# The estimate of (between-group or Intercept variance, tau_{00}^2):
mdlStudentOut$Att$tauNull <- 
  as.numeric(mdlStudentOut$Att$varNull[1])
# and the estimate of (within-group or residual variance, sigma^2) is:
mdlStudentOut$Att$sigmaNull <- 
  as.numeric(mdlStudentOut$Att$varNull[2])
# The ICC estimate (between/between+within) is:
mdlStudentOut$Att$IccNull <-
  (as.numeric(mdlStudentOut$Att$varNull[1]) / (as.numeric(mdlStudentOut$Att$varNull[1]) + as.numeric(mdlStudentOut$Att$varNull[2])))
mdlStudentOut$Att$IccPercNull <-
  ((as.numeric(mdlStudentOut$Att$varNull[1]) / (as.numeric(mdlStudentOut$Att$varNull[1]) + as.numeric(mdlStudentOut$Att$varNull[2])))) * 100

random intercept with level one predictors

We then add the core interaction need fulfillment to the multilevel random intercept model.

\[\begin{equation} \tag{26} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \end{split} \end{equation}\]

# Create and save Model
mdlStudentOut$Att$lmeInterceptCore <-
  lme(
    AttitudesDutch ~ KeyNeedFullfillment_cwc,
    random = ~ 1 | PID,
    data = studentOutWithinBetween
  )


# Get summary with p-values (Satterthwaite's method)
summ(
  mdlStudentOut$Att$lmerInterceptCore <- 
    lmer(AttitudesDutch ~ KeyNeedFullfillment_cwc + (1 | PID), 
       data = studentOutWithinBetween),
  confint = TRUE,
  digits = 3,
  center = TRUE
)
Observations 935
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 7236.781
BIC 7256.143
Pseudo-R² (fixed effects) 0.006
Pseudo-R² (total) 0.731
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 70.728 67.573 73.884 43.931 102.865 0.000
KeyNeedFullfillment_cwc 0.092 0.053 0.131 4.642 822.859 0.000
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 16.065
Residual 9.777
Grouping Variables
Group # groups ICC
PID 108 0.730
# 95%CI
mdlStudentOut$Att$lmerInterceptCoreCI <- 
  confint(method = "Wald", mdlStudentOut$Att$lmerInterceptCore)

# Compare new model to previous step
anova(mdlStudentOut$Att$lmeNull, 
      mdlStudentOut$Att$lmeInterceptCore) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Student: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 23: Student: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlStudentOut\(Att\)lmeNull 1 3 7250 7265 -3622
mdlStudentOut\(Att\)lmeInterceptCore 2 4 7237 7256 -3614 1 vs 2 15.289 < .001
# Save variances
mdlStudentOut$Att$varInterceptCore <-
  lme4::VarCorr(mdlStudentOut$Att$lmeInterceptCore)

# The estimate of between-group (or Intercept variance) explained:
# Variance Explained = 1 – (Var with Predictor/Var without Predictor)
mdlStudentOut$Att$varBtwInterceptCore <- 
  1 - (as.numeric(mdlStudentOut$Att$varInterceptCore[1]) / as.numeric(mdlStudentOut$Att$varNull[1]))
mdlStudentOut$Att$varBtwPercInterceptCore <- 
  (1 - (as.numeric(mdlStudentOut$Att$varInterceptCore[1]) / as.numeric(mdlStudentOut$Att$varNull[1]))) * 100
# and the estimate of within-group (or residual variance) explained is:
mdlStudentOut$Att$varWithinInterceptCore <-
  1 - (as.numeric(mdlStudentOut$Att$varInterceptCore[2]) / as.numeric(mdlStudentOut$Att$varNull[2]))
mdlStudentOut$Att$varWithinPercInterceptCore <-
  (1 - (as.numeric(mdlStudentOut$Att$varInterceptCore[2]) / as.numeric(mdlStudentOut$Att$varNull[2]))) * 100

We find that the the model with the added predictor indeed explains more variance in outgroup attitudes than the unconditional model. Looking at the individual coefficients, we find that the situational core need relates significantly to outgroup attitudes. The core need has little explained variance between participants (compared to the null model: Variance Explained = 1 – (Var with Predictor/Var without Predictor); -0.29%). The variance explained within participants is small to medium (2.43%).

random slope

In a next step, we check whether further letting the effect of core need fulfillment vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{27} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlStudentOut$Att$lmeSlopesCore <-
  lme(
    AttitudesDutch ~
      KeyNeedFullfillment_cwc,
    random = ~ 1 + KeyNeedFullfillment_cwc | PID,
    control = lmeControl(opt = "optim"),
    data = studentOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlStudentOut$Att$lmerSlopesCore <- lmer(
    AttitudesDutch ~
      KeyNeedFullfillment_cwc +
      (1 + KeyNeedFullfillment_cwc | PID),
    data = studentOutWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 935
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 7222.092
BIC 7251.135
Pseudo-R² (fixed effects) 0.012
Pseudo-R² (total) 0.750
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 70.710 67.552 73.868 43.883 102.993 0.000
KeyNeedFullfillment_cwc 0.127 0.068 0.186 4.201 44.956 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 16.118
PID KeyNeedFullfillment_cwc 0.146
Residual 9.478
Grouping Variables
Group # groups ICC
PID 108 0.743
# all variables standardized within PPT
summ(
  mdlStudentOut$Att$lmerSlopesCoreZ <- lmer(
    AttitudesDutch_zwc ~
      KeyNeedFullfillment_zwc +
      (1 + KeyNeedFullfillment_zwc | PID),
    data = studentOutWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 836
Dependent variable AttitudesDutch_zwc
Type Mixed effects linear regression
AIC 2286.487
BIC 2314.859
Pseudo-R² (fixed effects) 0.019
Pseudo-R² (total) 0.053
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 0.000 -0.063 0.063 0.000 770.875 1.000
KeyNeedFullfillment_zwc 0.138 0.056 0.220 3.301 58.109 0.002
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 0.000
PID KeyNeedFullfillment_zwc 0.185
Residual 0.926
Grouping Variables
Group # groups ICC
PID 85 0.000
# standardized coefficients
stdCoef.merMod(mdlStudentOut$Att$lmerSlopesCore)
##                         stdcoef   stdse
## (Intercept)               0.000 0.00000
## KeyNeedFullfillment_cwc   0.115 0.02737
# 95%CIs
mdlStudentOut$Att$lmerSlopesCoreCI <- 
  confint(method = "Wald", mdlStudentOut$Att$lmerSlopesCore)

# Attempts at R^2
r2mlm::r2mlm(mdlStudentOut$Att$lmerSlopesCore, bargraph = TRUE)

## $Decompositions
##                 total              within             between
## fixed, within   0.011622870055738  0.0419528760238586 NA     
## fixed, between  0                  NA                 0      
## slope variation 0.0154453115745091 0.055750020307192  NA     
## mean variation  0.722954153390388  NA                 1      
## sigma2          0.249977664979365  0.902297103668949  NA     
## 
## $R2s
##     total              within             between
## f1  0.011622870055738  0.0419528760238586 NA     
## f2  0                  NA                 0      
## v   0.0154453115745091 0.055750020307192  NA     
## m   0.722954153390388  NA                 1      
## f   0.011622870055738  NA                 NA     
## fv  0.0270681816302471 0.0977028963310506 NA     
## fvm 0.750022335020635  NA                 NA
mitml::multilevelR2(mdlStudentOut$Att$lmerSlopesCore)
##       RB1       RB2        SB       MVP 
##  0.083183 -0.009516  0.016046  0.011623
performance::r2(mdlStudentOut$Att$lmerSlopesCore)
## # R2 for Mixed Models
## 
##   Conditional R2: 0.750
##      Marginal R2: 0.012
performance::model_performance(mdlStudentOut$Att$lmerSlopesCore)
## # Indices of model performance
## 
## AIC      |      BIC | R2 (cond.) | R2 (marg.) |   ICC |  RMSE | Sigma
## ---------------------------------------------------------------------
## 7222.092 | 7251.135 |      0.750 |      0.012 | 0.747 | 8.829 | 9.478
performance::compare_performance(mdlStudentOut$Att$lmerNull, 
                                 mdlStudentOut$Att$lmerInterceptCore, 
                                 mdlStudentOut$Att$lmerSlopesCore)
## # Comparison of Model Performance Indices
## 
## Name    |   Model |      AIC |      BIC | R2 (cond.) | R2 (marg.) |   ICC |  RMSE | Sigma
## -----------------------------------------------------------------------------------------
## Model 1 | lmerMod | 7250.070 | 7264.591 |      0.724 |      0.000 | 0.724 | 9.357 | 9.898
## Model 2 | lmerMod | 7236.781 | 7256.143 |      0.731 |      0.006 | 0.730 | 9.236 | 9.777
## Model 3 | lmerMod | 7222.092 | 7251.135 |      0.750 |      0.012 | 0.747 | 8.829 | 9.478
# Compare new model to previous step
anova(mdlStudentOut$Att$lmeNull, 
      mdlStudentOut$Att$lmeInterceptCore, 
      mdlStudentOut$Att$lmeSlopesCore)  %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Student: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 24: Student: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlStudentOut\(Att\)lmeNull 1 3 7250 7265 -3622
mdlStudentOut\(Att\)lmeInterceptCore 2 4 7237 7256 -3614 1 vs 2 15.289 < .001
mdlStudentOut\(Att\)lmeSlopesCore 3 6 7222 7251 -3605 2 vs 3 18.681 < .001
# Save variances
mdlStudentOut$Att$varSlopesCore <- 
  lme4::VarCorr(mdlStudentOut$Att$lmeSlopesCore)

# Assumption Checks:
mdlStudentOut$Att$diagSlopesCore <- 
  sjPlot::plot_model(mdlStudentOut$Att$lmerSlopesCore, type = "diag")
grid.arrange(
  mdlStudentOut$Att$diagSlopesCore[[1]],
  mdlStudentOut$Att$diagSlopesCore[[2]]$`PID`,
  mdlStudentOut$Att$diagSlopesCore[[3]],
  mdlStudentOut$Att$diagSlopesCore[[4]]
)

# Plot prediction model
mdlStudentOut$Att$predSlopesCore <- 
  studentOutWithinBetween %>%
  select(AttitudesDutch, TIDnum, PID) %>% 
  mutate(measure = predict(mdlStudentOut$Att$lmeSlopesCore,
                           studentOutWithinBetween,
                           re.form = NA
                           )
         )

(
  mdlStudentOut$Att$predPltSlopesCore <-
    ggplot(data = mdlStudentOut$Att$predSlopesCore %>% filter(PID %in% studentOutPltIDs), 
           aes(x = TIDnum, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = AttitudesDutch), alpha = 1) +
    facet_wrap( ~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/StudentOut_PredictionPlot_SlopesAttCore.png",
  mdlStudentOut$Att$predPltSlopesCore,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

We find that adding the random slopes does add significantly beyond the random intercept model. We also find that the core need remains a strong predictor (even when letting the influence vary between participants).

TL;DR: The random slope adds significantly to the prediction model.

Need fulfillment and Interaction Quality

Based on the assertion that the relationship between core need fulfillment and outgroup attitudes is explained by a higher perceived interaction, the core need fulfillment should also significantly predict perceived interaction quality.

Unconditional model

Given that we now have the perceived interaction quality as our outcome variable of interest we again begin with an unconditional model (i.e., empty random intercept model), to see whether there is enough variance to explain within the participants. Similarly to before this is again done within the subsample of measurements during which an outgroup interaction was reported.

\[\begin{equation} \tag{28} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ InteractionQuality_{ti} = &\ \beta_{0i} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \end{split} \end{equation}\]

# Create and save Model
mdlStudentOut$Qlt$lmerNull <-
  lme4::lmer(quality_overall ~ 1 + (1 | PID), 
             data = studentOutWithinBetween) # use optim if it does not converge
mdlStudentOut$Qlt$lmeNull <-
  lme(
    quality_overall ~ 1,
    random = ~ 1 | PID,
    data = studentOutWithinBetween,
    control = list(opt = "nlmimb")
  ) # use optim if it does not converge

# Get summary with p-values (Satterthwaite's method)
# summary(Null.Out.Qual.ML.r) #or with the lme function
summ(mdlStudentOut$Qlt$lmerNull, digits = 3, center = TRUE)
Observations 935
Dependent variable quality_overall
Type Mixed effects linear regression
AIC 8179.693
BIC 8194.215
Pseudo-R² (fixed effects) 0.000
Pseudo-R² (total) 0.144
Fixed Effects
Est. S.E. t val. d.f. p
(Intercept) 78.849 1.017 77.536 100.065 0.000
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 7.506
Residual 18.318
Grouping Variables
Group # groups ICC
PID 108 0.144
# Save variances
mdlStudentOut$Qlt$varNull <- 
  VarCorr(mdlStudentOut$Qlt$lmeNull) # save variances
# The estimate of (between-group or Intercept variance, tau_{00}^2):
mdlStudentOut$Qlt$tauNull <- 
  as.numeric(mdlStudentOut$Qlt$varNull[1])
# and the estimate of (within-group or residual variance, sigma^2) is:
mdlStudentOut$Qlt$sigmaNull <- 
  as.numeric(mdlStudentOut$Qlt$varNull[2])
# The ICC estimate (between/between+within) is:
mdlStudentOut$Qlt$IccNull <-
  (as.numeric(mdlStudentOut$Qlt$varNull[1]) / (as.numeric(mdlStudentOut$Qlt$varNull[1]) + as.numeric(mdlStudentOut$Qlt$varNull[2])))
mdlStudentOut$Qlt$IccPercNull <-
  ((as.numeric(mdlStudentOut$Qlt$varNull[1]) / (as.numeric(mdlStudentOut$Qlt$varNull[1]) + as.numeric(mdlStudentOut$Qlt$varNull[2])))) * 100

We again find a reasonable amount of variance within the participants.

random intercept with level one predictor

We again add the core interaction need fulfillment to the multilevel random intercept model.

\[\begin{equation} \tag{29} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ InteractionQuality_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \end{split} \end{equation}\]

# Create and save Model
mdlStudentOut$Qlt$lmeInterceptCore <-
  lme(
    quality_overall ~ KeyNeedFullfillment_cwc,
    random = ~ 1 | PID,
    data = studentOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlStudentOut$Qlt$lmerInterceptCore <- 
    lmer(quality_overall ~ KeyNeedFullfillment_cwc + (1 | PID), 
       data = studentOutWithinBetween),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 935
Dependent variable quality_overall
Type Mixed effects linear regression
AIC 8102.427
BIC 8121.790
Pseudo-R² (fixed effects) 0.073
Pseudo-R² (total) 0.225
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 78.851 76.864 80.838 77.784 100.436 0.000
KeyNeedFullfillment_cwc 0.332 0.263 0.401 9.399 844.010 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 7.721
Residual 17.440
Grouping Variables
Group # groups ICC
PID 108 0.164
# 95%CI
mdlStudentOut$Qlt$lmerInterceptCoreCI <- 
  confint(method = "Wald", mdlStudentOut$Qlt$lmerInterceptCore)

# Compare new model to previous step
anova(mdlStudentOut$Qlt$lmeNull, 
      mdlStudentOut$Qlt$lmeInterceptCore) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Student: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 25: Student: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlStudentOut\(Qlt\)lmeNull 1 3 8180 8194 -4087
mdlStudentOut\(Qlt\)lmeInterceptCore 2 4 8102 8122 -4047 1 vs 2 79.266 < .001
# Save variances
mdlStudentOut$Qlt$varInterceptCore <-
  lme4::VarCorr(mdlStudentOut$Qlt$lmeInterceptCore)

# The estimate of between-group (or Intercept variance) explained:
# Variance Explained = 1 – (Var with Predictor/Var without Predictor)
mdlStudentOut$Qlt$varBtwInterceptCore <- 
  1 - (as.numeric(mdlStudentOut$Qlt$varInterceptCore[1]) / as.numeric(mdlStudentOut$Qlt$varNull[1]))
mdlStudentOut$Qlt$varBtwPercInterceptCore <- 
  (1 - (as.numeric(mdlStudentOut$Qlt$varInterceptCore[1]) / as.numeric(mdlStudentOut$Qlt$varNull[1]))) * 100
# and the estimate of within-group (or residual variance) explained is:
mdlStudentOut$Qlt$varWithinInterceptCore <-
  1 - (as.numeric(mdlStudentOut$Qlt$varInterceptCore[2]) / as.numeric(mdlStudentOut$Qlt$varNull[2]))
mdlStudentOut$Qlt$varWithinPercInterceptCore <-
  (1 - (as.numeric(mdlStudentOut$Qlt$varInterceptCore[2]) / as.numeric(mdlStudentOut$Qlt$varNull[2]))) * 100

The predictor again adds a significant amount of explained variances beyond the empty model and looking at the slope coefficient, we find that the situational core need fulfillment relates significantly to perceived interaction quality. The core need explained substantial variance between participants (-5.82%). The variance explained within participants is also medium (9.35%).

random slope

As before, we check whether further letting the effect of core need fulfillment vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{30} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ InteractionQuality_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlStudentOut$Qlt$lmeSlopesCore <-
  lme(
    quality_overall ~
      KeyNeedFullfillment_cwc,
    random = ~ 1 + KeyNeedFullfillment_cwc | PID,
    control = lmeControl(opt = "optim"),
    data = studentOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlStudentOut$Qlt$lmerSlopesCore <-
    lmer(
      quality_overall ~
        KeyNeedFullfillment_cwc +
        (1 + KeyNeedFullfillment_cwc | PID),
      data = studentOutWithinBetween
    ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 935
Dependent variable quality_overall
Type Mixed effects linear regression
AIC 8072.820
BIC 8101.863
Pseudo-R² (fixed effects) 0.102
Pseudo-R² (total) 0.331
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 78.842 76.858 80.825 77.897 100.477 0.000
KeyNeedFullfillment_cwc 0.402 0.280 0.524 6.458 36.171 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 7.937
PID KeyNeedFullfillment_cwc 0.346
Residual 16.604
Grouping Variables
Group # groups ICC
PID 108 0.186
# 95%CI
mdlStudentOut$Qlt$lmerSlopesCoreCI <- 
  confint(method = "Wald", mdlStudentOut$Qlt$lmerSlopesCore)

# Compare new model to previous step
anova(mdlStudentOut$Qlt$lmeNull, 
      mdlStudentOut$Qlt$lmeInterceptCore, 
      mdlStudentOut$Qlt$lmeSlopesCore) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Student: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 26: Student: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlStudentOut\(Qlt\)lmeNull 1 3 8180 8194 -4087
mdlStudentOut\(Qlt\)lmeInterceptCore 2 4 8102 8122 -4047 1 vs 2 79.266 < .001
mdlStudentOut\(Qlt\)lmeSlopesCore 3 6 8073 8102 -4030 2 vs 3 33.595 < .001
# Save variances
mdlStudentOut$Qlt$varSlopesCore <- 
  lme4::VarCorr(mdlStudentOut$Qlt$lmeSlopesCore)

# Assumption Checks:
mdlStudentOut$Qlt$diagSlopesCore <-
  sjPlot::plot_model(mdlStudentOut$Qlt$lmerSlopesCore, type = "diag")
grid.arrange(
  mdlStudentOut$Qlt$diagSlopesCore[[1]],
  mdlStudentOut$Qlt$diagSlopesCore[[2]]$`PID`,
  mdlStudentOut$Qlt$diagSlopesCore[[3]],
  mdlStudentOut$Qlt$diagSlopesCore[[4]]
)

# Plot prediction model
mdlStudentOut$Qlt$predSlopesCore <- 
  studentOutWithinBetween %>%
  select(AttitudesDutch, TIDnum, PID) %>% 
  mutate(measure = predict(mdlStudentOut$Qlt$lmeSlopesCore,
                           studentOutWithinBetween,
                           re.form = NA
                           )
         )

(
  mdlStudentOut$Qlt$predPltSlopesCore <-
    ggplot(data = mdlStudentOut$Qlt$predSlopesCore %>% filter(PID %in% studentOutPltIDs), 
           aes(x = TIDnum, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = AttitudesDutch), alpha = 1) +
    facet_wrap(~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/StudentOut_PredictionPlot_SlopesCore.png",
  mdlStudentOut$Qlt$predPltSlopesCore,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

We find that adding the random slopes does add significantly beyond the random intercept model.

Interaction Needs, Quality, and Attitudes

In our final main step, we will jointly consider the effect of core need fulfillment and perceived interaction quality on outgroup attitudes. We expect that if the relationship between core need fulfillment and outgroup attitudes is indeed explained by a higher perceived interaction quality, the interaction quality perception should assume the explained variance of the core contact need fulfillment.

random intercept with level one predictors

We thus add both the core need fulfillment and perceived interaction quality to a random intercept model of outgroup attitudes.

\[\begin{equation} \tag{31} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}InteractionQuality_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \\ &\ \beta_{2i} = \gamma_{20} \end{split} \end{equation}\]

# Create and save Model
mdlStudentOut$Att$lmeInterceptCoreQlt <-
  lme(
    AttitudesDutch ~ KeyNeedFullfillment_cwc + quality_overall_cwc,
    random = ~ 1 | PID,
    data = studentOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlStudentOut$Att$lmerInterceptCoreQlt <- lmer(
    AttitudesDutch ~ KeyNeedFullfillment_cwc + quality_overall_cwc + (1 | PID),
    data = studentOutWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 935
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 7182.599
BIC 7206.802
Pseudo-R² (fixed effects) 0.023
Pseudo-R² (total) 0.752
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 70.707 67.547 73.867 43.862 103.042 0.000
KeyNeedFullfillment_cwc 0.042 0.003 0.081 2.090 821.903 0.037
quality_overall_cwc 0.151 0.114 0.187 8.040 821.903 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 16.130
Residual 9.420
Grouping Variables
Group # groups ICC
PID 108 0.746
# 95%CI
mdlStudentOut$Att$lmerInterceptCoreQltCI <- 
  confint(method = "Wald", mdlStudentOut$Att$lmerInterceptCoreQlt)

# Compare new model to previous step
anova(
  mdlStudentOut$Att$lmeNull, 
  mdlStudentOut$Att$lmeInterceptCoreQlt
  ) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Student: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 27: Student: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlStudentOut\(Att\)lmeNull 1 3 7250 7265 -3622
mdlStudentOut\(Att\)lmeInterceptCoreQlt 2 5 7183 7207 -3586 1 vs 2 71.471 < .001
# Save variances
mdlStudentOut$Att$varInterceptCoreQlt <-
  lme4::VarCorr(mdlStudentOut$Att$lmeInterceptCoreQlt)

# The estimate of between-group (or Intercept variance) explained:
# Variance Explained = 1 – (Var with Predictor/Var without Predictor)
mdlStudentOut$Att$varBtwInterceptCoreQlt <- 
  1 - (as.numeric(mdlStudentOut$Att$varInterceptCoreQlt[1]) / as.numeric(mdlStudentOut$Att$varNull[1]))
mdlStudentOut$Att$varBtwPercInterceptCoreQlt <- 
  (1 - (as.numeric(mdlStudentOut$Att$varInterceptCoreQlt[1]) / as.numeric(mdlStudentOut$Att$varNull[1]))) * 100
# and the estimate of within-group (or residual variance) explained is:
mdlStudentOut$Att$varWithinInterceptCoreQlt <-
  1 - (as.numeric(mdlStudentOut$Att$varInterceptCoreQlt[2]) / as.numeric(mdlStudentOut$Att$varNull[2]))
mdlStudentOut$Att$varWithinPercInterceptCoreQlt <-
  (1 - (as.numeric(mdlStudentOut$Att$varInterceptCoreQlt[2]) / as.numeric(mdlStudentOut$Att$varNull[2]))) * 100

Unsurprisingly, the model with the two predictors adds significantly beyond the empty unconditional model. However, more importantly, looking at the coefficients, we find that the effect of core need fulfillment indeed is indeed strongly reduced and the variance is explained by the perceived interaction quality. The variance explained in outgroup attitudes is of medium effect size (between: -1.14%, within: 9.44%).

random slope

We again check whether further letting the effects vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{32} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}InteractionQuality_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \\ &\ \beta_{2i} = \gamma_{20} + u_{2i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlStudentOut$Att$lmeSlopesCoreQlt <-
  lme(
    AttitudesDutch ~
      KeyNeedFullfillment_cwc + quality_overall_cwc,
    random = ~ 1 + KeyNeedFullfillment_cwc + quality_overall_cwc | PID,
    control = lmeControl(opt = "optim"),
    data = studentOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlStudentOut$Att$lmerSlopesCoreQlt <- lmer(
    AttitudesDutch ~
      KeyNeedFullfillment_cwc + quality_overall_cwc +
      (1 + KeyNeedFullfillment_cwc + quality_overall_cwc | PID),
    data = studentOutWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 935
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 7144.325
BIC 7192.731
Pseudo-R² (fixed effects) 0.028
Pseudo-R² (total) 0.789
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 70.670 67.503 73.836 43.744 103.376 0.000
KeyNeedFullfillment_cwc 0.028 -0.011 0.067 1.427 777.312 0.154
quality_overall_cwc 0.175 0.112 0.237 5.473 48.349 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 16.251
PID KeyNeedFullfillment_cwc 0.007
PID quality_overall_cwc 0.206
Residual 8.762
Grouping Variables
Group # groups ICC
PID 108 0.775
# 95%CI
mdlStudentOut$Att$lmerSlopesCoreQltCI <- 
  confint(method = "Wald", mdlStudentOut$Att$lmerSlopesCoreQlt)

# Compare new model to previous step
anova(
  mdlStudentOut$Att$lmeNull,
  mdlStudentOut$Att$lmeInterceptCoreQlt,
  mdlStudentOut$Att$lmeSlopesCoreQlt
) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Student: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 28: Student: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlStudentOut\(Att\)lmeNull 1 3 7250 7265 -3622
mdlStudentOut\(Att\)lmeInterceptCoreQlt 2 5 7183 7207 -3586 1 vs 2 71.471 < .001
mdlStudentOut\(Att\)lmeSlopesCoreQlt 3 10 7142 7190 -3561 2 vs 3 50.911 < .001
# Save variances
mdlStudentOut$Att$varSlopesCoreQlt <- 
  lme4::VarCorr(mdlStudentOut$Att$lmeSlopesCoreQlt)

# Assumption Checks:
mdlStudentOut$Att$diagSlopesCoreQlt <- 
  sjPlot::plot_model(mdlStudentOut$Att$lmerSlopesCoreQlt, type = "diag")
grid.arrange(
  mdlStudentOut$Att$diagSlopesCoreQlt[[1]],
  mdlStudentOut$Att$diagSlopesCoreQlt[[2]]$`PID`,
  mdlStudentOut$Att$diagSlopesCoreQlt[[3]],
  mdlStudentOut$Att$diagSlopesCoreQlt[[4]]
)

# Plot prediction model
mdlStudentOut$Att$predSlopesCoreQlt <- 
  studentOutWithinBetween %>%
  select(AttitudesDutch, TIDnum, PID) %>% 
  mutate(measure = predict(mdlStudentOut$Att$lmeSlopesCoreQlt,
                           studentOutWithinBetween,
                           re.form = NA
                           )
         )

(
  mdlStudentOut$Att$predPltSlopesCoreQlt <-
    ggplot(data = mdlStudentOut$Att$predSlopesCoreQlt %>% filter(PID %in% studentOutPltIDs), 
           aes(x = TIDnum, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = AttitudesDutch), alpha = 1) +
    facet_wrap( ~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/StudentOut_PredictionPlot_SlopesAttCoreQlt.png",
  mdlStudentOut$Att$predPltSlopesCoreQlt,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

We find that adding the random slopes does add significantly beyond the random intercept model. We also find that the perceived interaction quality remains a strong predictor (even when letting the slopes vary between participants).

Check for robustness

To build further confidence in our results, we assess a few additional models that might offer alternative explanations of the effects we find.

Interaction Type

To make certain that the effect of core need fulfillment is specific to the interaction we compare the the effect to fulfillment of the situation core need when no outgroup interaction took place.

random intercept

Here we go back to the full dataset and add generalized situational core need fulfillment (either during an interaction or about the daytime in general) and whether an outgroup interaction happened as well as their interaction term to a random intercept model of outgroup attitudes.

\[\begin{equation} \tag{33} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}OutgroupInteraction_{ti} + \\ &\ \beta_{3i}KeyNeedFulfillXOutgroupInteraction_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \\ &\ \beta_{2i} = \gamma_{20} \\ &\ \beta_{3i} = \gamma_{30} \end{split} \end{equation}\]

# Create and save Model
mdlStudent$lmeInterceptAttCoreInt <-
  lme(
    AttitudesDutch ~ KeyNeedFullfillment_cwc * OutgroupInteraction,
    random =  ~ 1 | PID,
    data = studentWithinBetween
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlStudent$lmerInterceptAttCoreInt <- lmer(
    AttitudesDutch ~ KeyNeedFullfillment_cwc * OutgroupInteraction + (1 | PID),
    data = studentWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 4965
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 36660.520
BIC 36699.581
Pseudo-R² (fixed effects) 0.005
Pseudo-R² (total) 0.803
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 64.160 60.644 67.675 35.770 126.453 0.000
KeyNeedFullfillment_cwc -0.057 -0.101 -0.012 -2.500 4851.889 0.012
OutgroupInteraction 2.592 1.870 3.314 7.041 4861.115 0.000
KeyNeedFullfillment_cwc:OutgroupInteraction 0.077 0.040 0.114 4.083 4852.193 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 18.413
Residual 9.144
Grouping Variables
Group # groups ICC
PID 113 0.802
# 95%CI
mdlStudent$lmerInterceptAttCoreIntCI <- 
  confint(method = "Wald", mdlStudent$lmerInterceptAttCoreInt)

# Compare new model to previous step
anova(mdlStudent$lmeAttNull, 
      mdlStudent$lmeInterceptAttCoreInt) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  add_rownames(., var = "Description") %>%
  mutate(Description = gsub(".*\\$", "", Description)) %>%
  kbl(
    .,
    caption = "Student: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 29: Student: Model Comparison
Description Model df AIC BIC logLik Test L.Ratio p-value
lmeAttNull 1 3 36741 36760 -18367
lmeInterceptAttCoreInt 2 6 36661 36700 -18324 1 vs 2 86.405 < .001
# Save variances
mdlStudent$varInterceptAttCoreInt <- 
  lme4::VarCorr(mdlStudent$lmeInterceptAttCoreInt)

# The estimate of between-group (or Intercept variance) explained:
# Variance Explained = 1 – (Var with Predictor/Var without Predictor)
mdlStudent$varBtwInterceptAttCoreInt <-
  1 - (as.numeric(mdlStudent$varInterceptAttCoreInt[1]) / as.numeric(mdlStudent$varAttNull[1]))
mdlStudent$varBtwPercInterceptAttCoreInt <-
  (1 - (as.numeric(mdlStudent$varInterceptAttCoreInt[1]) / as.numeric(mdlStudent$varAttNull[1]))) * 100
# and the estimate of within-group (or residual variance) explained is:
mdlStudent$varWithinInterceptAttCoreInt <-
  1 - (as.numeric(mdlStudent$varInterceptAttCoreInt[2]) / as.numeric(mdlStudent$varAttNull[2]))
mdlStudent$varWithinPercInterceptAttCoreInt <-
  (1 - (as.numeric(mdlStudent$varInterceptAttCoreInt[2]) / as.numeric(mdlStudent$varAttNull[2]))) * 100

We find that the model explains significantly more variance than the empty null model. However, more interestingly, looking at the coefficients, we find that, as seen earlier, having an outgroup interaction has a strong effect on outgroup attitudes. Importantly, we find that there is a main effect of key need fulfillment but also a significant interaction effect of core need fulfillment and outgroup contact. This indicates that it is not simply key need fulfillment in general — but especially key need fulfillment during an outgroup contact that predicts more positive outgroup attitudes.

random slope

We again check whether further letting the effects vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{34} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}OutgroupInteraction_{ti} + \\ &\ \beta_{3i}KeyNeedFulfillXOutgroupInteraction_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \\ &\ \beta_{2i} = \gamma_{20} + u_{2i} \\ &\ \beta_{3i} = \gamma_{30} + u_{3i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlStudent$lmeSlopesAttCoreInt <- lme(
  AttitudesDutch ~
    KeyNeedFullfillment_cwc * OutgroupInteraction,
  random = ~ 1 + KeyNeedFullfillment_cwc + OutgroupInteraction | PID,
  control = lmeControl(opt = "optim"),
  data = studentWithinBetween
)

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlStudent$lmerSlopesAttCoreInt <- lmer(
    AttitudesDutch ~
      KeyNeedFullfillment_cwc * OutgroupInteraction +
      (1 + KeyNeedFullfillment_cwc + OutgroupInteraction | PID),
    data = studentWithinBetween
  ), 
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 4965
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 36474.616
BIC 36546.228
Pseudo-R² (fixed effects) 0.005
Pseudo-R² (total) 0.818
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 63.900 59.558 68.241 28.849 108.630 0.000
KeyNeedFullfillment_cwc -0.033 -0.081 0.016 -1.330 651.941 0.184
OutgroupInteraction 2.878 1.359 4.397 3.713 94.287 0.000
KeyNeedFullfillment_cwc:OutgroupInteraction 0.061 0.021 0.100 3.025 1317.462 0.003
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 22.881
PID KeyNeedFullfillment_cwc 0.055
PID OutgroupInteraction 6.843
Residual 8.794
Grouping Variables
Group # groups ICC
PID 113 0.871
# 95%CI
mdlStudent$lmerSlopesAttCoreIntCI <- 
  confint(method = "Wald", mdlStudent$lmerSlopesAttCoreInt)

# Compare new model to previous step
anova(mdlStudent$lmeAttNull, 
      mdlStudent$lmeInterceptAttCoreInt,
      mdlStudent$lmeSlopesAttCoreInt) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  add_rownames(., var = "Description") %>%
  mutate(Description = gsub(".*\\$", "", Description)) %>%
  kbl(
    .,
    caption = "Student: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = TRUE,
    align = rep("l", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 30: Student: Model Comparison
Description Model df AIC BIC logLik Test L.Ratio p-value
lmeAttNull 1 3 36741 36760 -18367
lmeInterceptAttCoreInt 2 6 36661 36700 -18324 1 vs 2 86.405 < .001
lmeSlopesAttCoreInt 3 11 36475 36546 -18226 2 vs 3 195.901 < .001
# Save variances
mdlStudent$varSlopesAttCoreInt <- 
  lme4::VarCorr(mdlStudent$lmeSlopesAttCoreInt)

# Assumption Checks:
mdlStudent$diagSlopesAttCoreInt <-
  sjPlot::plot_model(mdlStudent$lmerSlopesAttCoreInt, type = "diag")
grid.arrange(
  mdlStudent$diagSlopesAttCoreInt[[1]],
  mdlStudent$diagSlopesAttCoreInt[[2]]$`PID`,
  mdlStudent$diagSlopesAttCoreInt[[3]],
  mdlStudent$diagSlopesAttCoreInt[[4]]
)

# Plot prediction model
mdlStudent$predSlopesAttCoreInt <- 
  studentWithinBetween %>%
  select(AttitudesDutch, TIDnum, PID) %>% 
  mutate(measure = predict(mdlStudent$lmeSlopesAttCoreInt,
                           studentWithinBetween,
                           re.form = NA
                           )
         )

(
  mdlStudent$predPltSlopesAttCoreInt <-
    ggplot(data = mdlStudent$predSlopesAttCoreInt %>% filter(PID %in% studentPltIDs), 
           aes(x = TIDnum, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = AttitudesDutch), alpha = 1) +
    facet_wrap(~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/Student_PredictionPlot_SlopesAttCoreInt.png",
  mdlStudent$predPltSlopesAttCoreInt,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

We find that adding the random slopes does add significantly beyond the random intercept model. We also see that when taking the possibility to varying slopes into account, the coefficient interpretations remains consistent (i.e., outgroup contact and its interaction with core need fulfillment remain important predictors of positive outgroup attitudes).

Plot Interaction

Before we move on, we shortly illustrate the interaction effect of how the effect of core need fulfillment differed by whether an outgroup contact took place or not. To this end we illustrate (1) the raw data points (without taking the nested nature into account), as well as a plot of the model predicted values and their prediction interval (taking the nested structure into account based; similar to an interaction plot).

# visualize interaction
## Without ML structure
ggplot(data = studentInteractionType,
       aes(x = KeyNeedFullfillment,
           y = AttitudesDutch,
           fill = OutgroupInteraction)) +
  #geom_point()+
  geom_smooth(method = 'lm',
              aes(linetype = OutgroupInteraction),
              color = "black") +
  #facet_wrap(~PID, ncol = 6)+
  scale_linetype_manual(values = c("dashed", "solid")) +
  scale_fill_manual(values = c("darkgrey", "black")) +
  #scale_colour_manual(values=c("grey20", "black"), name="Intergroup Contact")+
  scale_y_continuous(
    limits = c(50, 100),
    breaks = seq(50, 100, by = 10),
    position = "left"
  ) +
  scale_x_continuous(limits = c(0, 100), breaks = seq(0, 100, by = 10)) +
  labs(
    title = "Without ML stucture",
    x = "Fulfillment Core Need",
    y = "Outgroup Attitudes",
    fill = "Intergroup Contact",
    linetype = "Intergroup Contact"
  ) +
  theme_Publication() +
  theme(legend.position = "bottom",
        legend.key.size = unit(1, "cm"))

## With ML structure
# create parameters for prediction
datNew = data.frame(
  KeyNeedFullfillment_cwc = rep(seq(
    round_any(min(
      studentWithinBetween$KeyNeedFullfillment_cwc
    ), 5, f = floor), round_any(max(
      studentWithinBetween$KeyNeedFullfillment_cwc
    ), 5, f = ceiling), 5
  ), 2),
  PID = 0
) %>%
  mutate(OutgroupInteraction = rep(c(1, 2), each = nrow(.)/2)) %>%
  select(PID, OutgroupInteraction, KeyNeedFullfillment_cwc)


# Predict values, clean up and calculate SE
PI <-
  merTools::predictInterval(
    merMod = mdlStudent$lmerSlopesAttCoreInt,
    newdata = datNew,
    level = 0.95,
    stat = "mean",
    type = "linear.prediction",
    include.resid.var = F,
    fix.intercept.variance = F
  )
mdlStudent$predInterceptAttCoreIntX <- 
  cbind(datNew, PI)
mdlStudent$predInterceptAttCoreIntX$se <-
  (mdlStudent$predInterceptAttCoreIntX$upr - mdlStudent$predInterceptAttCoreIntX$fit) / 1.96
rm(datNew, PI)
mdlStudent$predInterceptAttCoreIntX$OutgroupInteractionLab <-
  factor(
    x = mdlStudent$predInterceptAttCoreIntX$OutgroupInteraction,
    levels = sort(
      unique(mdlStudent$predInterceptAttCoreIntX$OutgroupInteraction)
    ),
    labels = c("No", "Yes")
  )


# Plot predicted values with SE
ggplot(
  mdlStudent$predInterceptAttCoreIntX,
  aes(x = KeyNeedFullfillment_cwc,
      y = fit,
      fill = OutgroupInteractionLab)
)+
  #geom_point() +
  geom_line(aes(linetype = OutgroupInteractionLab), size = 1) +
  #facet_wrap(~PID, ncol = 6)+
  geom_ribbon(data = mdlStudent$predInterceptAttCoreIntX,
              aes(ymin = fit - se, ymax = fit + se),
              alpha = 0.3) +
  scale_x_continuous(breaks = seq(
    round_any(min(
      studentWithinBetween$KeyNeedFullfillment_cwc
    ), 5, f = floor), round_any(max(
      studentWithinBetween$KeyNeedFullfillment_cwc
    ), 5, f = ceiling), 10
  )) +
  scale_y_continuous(limits = c(50, 100), breaks = seq(50, 100, 10)) +
  scale_linetype_manual(values = c("dashed", "solid")) +
  scale_fill_manual(values = c("darkgrey", "black")) +
  labs(
    x = "Fulfillment Core Need",
    y = "Outgroup Attitude (NL)",
    fill = "Intergroup Contact",
    linetype = "Intergroup Contact",
    title = "Based on Model Predictions"
  ) +
  theme_Publication()

# #### Bayesian estimation !! ONLY RUN ON FINAL RENDER !! Takes forever ####
# options(mc.cores = parallel::detectCores())  # Run many chains simultaneously
# brmfit <- brm(
#   AttitudesDutch ~ KeyNeedFullfillment_cwc * OutgroupInteraction +
#     (1 + KeyNeedFullfillment_cwc + OutgroupInteraction | PID),
#   data = studentWithinBetween,
#   family = gaussian,
#   iter = 1000,
#   chains = 4
# )
# 
# # create parameters for prediction:
# datNew = data.frame(
#   KeyNeedFullfillment_cwc = rep(seq(
#     round_any(min(
#       studentWithinBetween$KeyNeedFullfillment_cwc
#     ), 2, f = floor), round_any(max(
#       studentWithinBetween$KeyNeedFullfillment_cwc
#     ), 2, f = ceiling), 2
#   ), 2)
# ) %>%
#   mutate(OutgroupInteraction = rep(c(1, 2), each = nrow(.)/2))
# 
# # Save predicted values and adjust names and labels
# fitavg <-
#   cbind(datNew,
#         fitted(brmfit, newdata = datNew, re_formula = NA)[, -2])
# names(fitavg)[names(fitavg) == "Estimate"] = "pred"
# fitavg$se <- (fitavg$Q97.5 - fitavg$pred) / 1.96
# fitavg$OutgroupInteractionLab <-
#   factor(
#     x = fitavg$OutgroupInteraction,
#     levels = sort(
#       unique(fitavg$OutgroupInteraction)
#     ),
#     labels = c("No", "Yes")
#   )
# 
# # Plot Bayesian SE prediction interval
# ggplot(fitavg,
#        aes(x = KeyNeedFullfillment_cwc,
#            y = pred,
#            fill = OutgroupInteractionLab)) +
#   scale_x_continuous(breaks = seq(
#     round_any(min(
#       studentWithinBetween$KeyNeedFullfillment_cwc
#     ), 5, f = floor), round_any(max(
#       studentWithinBetween$KeyNeedFullfillment_cwc
#     ), 5, f = ceiling), 10
#   )) +
#   scale_y_continuous(limits = c(50, 100), breaks = seq(50, 100, 10)) +
#   geom_line(aes(linetype = OutgroupInteractionLab), size = 1) +
#   geom_ribbon(aes(ymin = pred - se, ymax = pred + se), alpha = 0.2) +
#   scale_linetype_manual(values = c("dashed", "solid")) +
#   scale_fill_manual(values = c("darkgrey", "black")) +
#   labs(
#     x = "Fulfillment Core Need",
#     y = "Outgroup Attitude (NL)",
#     fill = "Intergroup Contact",
#     linetype = "Intergroup Contact",
#     title = "Based on Bayesian Prediction Interval"
#   ) +
#   theme_Publication()
# 
# # # plot all overlayed posteriors:
# # pst <- posterior_samples(brmfit, "b")
# # ggplot(studentWithinBetween,
# #        aes(x = KeyNeedFullfillment_cwc, y = AttitudesDutch)) +
# #   geom_point(shape = 4, alpha = .1) +
# #   geom_tile() +
# #   geom_abline(
# #     data = pst,
# #     aes(intercept = b_Intercept, slope = b_KeyNeedFullfillment_cwc),
# #     alpha = .025,
# #     size = .4
# #   ) +
# #   labs(title = "slope Posteriors",
# #        x = "Fulfillment Core Need",
# #        y = "Outgroup Attitudes (NL)") +
# #   theme_Publication()
# # rm(datNew, brmfit, fitavg, pst)

The plots indicate that especially once we take the nested data structure into account we can see a substantially stronger effect of core need fulfillment on outgroup attitudes during outgroup contacts than without outgroup contacts.

Other psychological needs

In a final step we check whether during the interaction the core situational need is a meaningful predictor even when taking other fundamental psychological needs into account. We focus on the three commonly considered self determination needs: competence, autonomy, and relatedness.

random intercept with level one predictors

We add the core need fulfillment with the three self determination needs to a random intercept model of outgroup attitudes.

\[\begin{equation} \tag{35} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}Autonomy_{ti} + \\ &\ \beta_{3i}Competence_{ti} + \\ &\ \beta_{4i}Relatedness_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \\ &\ \beta_{2i} = \gamma_{20} \\ &\ \beta_{3i} = \gamma_{30} \\ &\ \beta_{4i} = \gamma_{40} \end{split} \end{equation}\]

# Create and save Model
mdlStudentOut$Att$lmeInterceptCoreSdt <-
  lme(
    AttitudesDutch ~ KeyNeedFullfillment_cwc + Competence_cwc + Autonomy_cwc + Relatedness_cwc,
    random = ~ 1 | PID,
    data = studentOutWithinBetween,
    na.action = na.exclude
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlStudentOut$Att$lmerInterceptCoreSdt <- lmer(
    AttitudesDutch ~ KeyNeedFullfillment_cwc + Competence_cwc + Autonomy_cwc + Relatedness_cwc + (1 | PID),
    data = studentOutWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 935
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 7214.101
BIC 7247.985
Pseudo-R² (fixed effects) 0.019
Pseudo-R² (total) 0.746
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 70.713 67.554 73.871 43.881 102.992 0.000
KeyNeedFullfillment_cwc 0.059 0.020 0.098 2.955 819.889 0.003
Competence_cwc 0.054 0.012 0.097 2.488 819.889 0.013
Autonomy_cwc 0.033 -0.017 0.083 1.306 819.889 0.192
Relatedness_cwc 0.060 0.034 0.087 4.540 819.889 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 16.114
Residual 9.519
Grouping Variables
Group # groups ICC
PID 108 0.741
# 95%CI
mdlStudentOut$Att$lmerInterceptCoreSdtCI <- 
  confint(method = "Wald", mdlStudentOut$Att$lmerInterceptCoreSdt)

# To be compared against a model with only SDT needs
mdlStudentOut$Att$lmeInterceptSdt <-
  lme(
    AttitudesDutch ~ Competence_cwc + Autonomy_cwc + Relatedness_cwc,
    random = ~ 1 | PID,
    data = studentOutWithinBetween,
    na.action = na.exclude
  )

summ(
  mdlStudentOut$Att$lmerInterceptSdt <- lmer(
    AttitudesDutch ~ Competence_cwc + Autonomy_cwc + Relatedness_cwc + (1 | PID),
    data = studentOutWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = TRUE
)
Observations 935
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 7214.803
BIC 7243.846
Pseudo-R² (fixed effects) 0.017
Pseudo-R² (total) 0.744
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 70.715 67.558 73.873 43.889 102.970 0.000
Competence_cwc 0.065 0.023 0.108 2.999 820.883 0.003
Autonomy_cwc 0.033 -0.017 0.083 1.297 820.883 0.195
Relatedness_cwc 0.065 0.039 0.091 4.895 820.883 0.000
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 16.106
Residual 9.564
Grouping Variables
Group # groups ICC
PID 108 0.739
# Compare new model to previous step
anova(
  mdlStudentOut$Att$lmeNull,
  mdlStudentOut$Att$lmeInterceptSdt, 
  mdlStudentOut$Att$lmeInterceptCoreSdt
  ) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Student: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 31: Student: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlStudentOut\(Att\)lmeNull 1 3 7250 7265 -3622
mdlStudentOut\(Att\)lmeInterceptSdt 2 6 7215 7244 -3601 1 vs 2 41.267 < .001
mdlStudentOut\(Att\)lmeInterceptCoreSdt 3 7 7214 7248 -3600 2 vs 3 2.702 0.1
# Save variances
mdlStudentOut$Att$varInterceptCoreSdt <-
  lme4::VarCorr(mdlStudentOut$Att$lmeInterceptCoreSdt)

We find that the the model with the added predictor indeed explains more variance in outgroup attitudes than the unconditional model and we find that adding the core need adds further explained variance — beyond the self determination needs. Looking at the individual coefficients, we find that the situational core need relates significantly to outgroup attitudes, that it is a stronger predictor than any of the self determination theory needs and that it assumes some of the variance explained by the self determination theory needs (when compared to a model without the core need).

random slope

We again check whether further letting the effects vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{36} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}Autonomy_{ti} + \\ &\ \beta_{3i}Competence_{ti} + \\ &\ \beta_{4i}Relatedness_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \\ &\ \beta_{2i} = \gamma_{20} + u_{2i} \\ &\ \beta_{3i} = \gamma_{30} + u_{3i} \\ &\ \beta_{4i} = \gamma_{40} + u_{4i} \end{split} \end{equation}\]

# Reduced dataset if no variance in responses
studentRed <- 
  studentOutgroupInteraction %>% 
  # select(PID, AttitudesDutch, KeyNeedFullfillment, Competence, Autonomy, RelatednessInteraction) %>%
  group_by(PID) %>%
  filter(
    sd(AttitudesDutch) != 0,
    sd(KeyNeedFullfillment) != 0,
    sd(Competence) != 0,
    sd(Autonomy) != 0,
    sd(RelatednessInteraction) != 0
  ) %>%
  ungroup

studentRedCor <- 
  studentOutgroupInteraction %>%
  group_by(PID) %>%
  summarise(
    rAttCore = cor(AttitudesDutch, KeyNeedFullfillment),
    rAttComp = cor(AttitudesDutch, Competence),
    rAttAut = cor(AttitudesDutch, Autonomy),
    rAttRel = cor(AttitudesDutch, RelatednessInteraction),
    rCoreComp = cor(KeyNeedFullfillment, Competence),
    rCoreAut = cor(KeyNeedFullfillment, Autonomy),
    rCoreRel = cor(KeyNeedFullfillment, RelatednessInteraction),
    rCompAut = cor(Competence, Autonomy),
    rCompRel = cor(Competence, RelatednessInteraction),
    rAutRel = cor(Autonomy, RelatednessInteraction)
  ) %>%
  filter_at(vars(-PID), all_vars(abs(.) != "1"))
  # mutate(corMean = rowMeans(abs(.[2:ncol(.)]))) %>%
  # filter(corMean != "1")

studentRed2 <- 
  studentOutgroupInteraction %>%
  filter(PID %in% studentRedCor$PID)

# Create and save Model (optimizer needed to reach convergence) 
# Problem because some PPTs have 100 on all measures (SD = 0) AND/OR
# For some all cor = 1 or -1
# Removing these PPTs or their measurement beeps doesn't help
# However, removing eithe the Core Need, Autonomy, or Relatedness from the random slopes lets the model converge
# FOR NOW: Autonomy removed from random slopes because weakest predictor
mdlStudentOut$Att$lmeSlopesCoreSdt <-
  nlme::lme(
    AttitudesDutch ~
      KeyNeedFullfillment_cwc + Competence_cwc + Autonomy_cwc + Relatedness_cwc,
    random = ~ 1 + KeyNeedFullfillment_cwc + Competence_cwc + Relatedness_cwc | PID, # Autonomy_cwc + 
    control = lmeControl(opt = "optim", maxIter = 100, msMaxIter = 100),
    data = studentOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlStudentOut$Att$lmerSlopesCoreSdt <- lmer(
    AttitudesDutch ~
      KeyNeedFullfillment_cwc + Competence_cwc + Autonomy_cwc + Relatedness_cwc +
      (1 + KeyNeedFullfillment_cwc + Competence_cwc + Autonomy_cwc + Relatedness_cwc | PID),
    data = studentOutWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 935
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 7218.933
BIC 7320.585
Pseudo-R² (fixed effects) 0.023
Pseudo-R² (total) 0.771
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 70.689 67.526 73.852 43.804 103.194 0.000
KeyNeedFullfillment_cwc 0.079 0.025 0.133 2.886 40.058 0.006
Competence_cwc 0.056 0.011 0.100 2.455 103.227 0.016
Autonomy_cwc 0.026 -0.025 0.076 0.994 146.860 0.322
Relatedness_cwc 0.067 0.035 0.098 4.172 41.043 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 16.192
PID KeyNeedFullfillment_cwc 0.119
PID Competence_cwc 0.042
PID Autonomy_cwc 0.027
PID Relatedness_cwc 0.068
Residual 9.091
Grouping Variables
Group # groups ICC
PID 108 0.760
# 95%CI
mdlStudentOut$Att$lmerSlopesCoreSdtCI <- 
  confint(method = "Wald", mdlStudentOut$Att$lmerSlopesCoreSdt)

# Compare new model to previous step
anova(mdlStudentOut$Att$lmeInterceptSdt,
      mdlStudentOut$Att$lmeInterceptCoreSdt, 
      mdlStudentOut$Att$lmeSlopesCoreSdt) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Student: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 32: Student: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlStudentOut\(Att\)lmeInterceptSdt 1 6 7215 7244 -3601
mdlStudentOut\(Att\)lmeInterceptCoreSdt 2 7 7214 7248 -3600 1 vs 2 2.702 0.1
mdlStudentOut\(Att\)lmeSlopesCoreSdt 3 16 7212 7289 -3590 2 vs 3 20.407 0.016
# Save variances
mdlStudentOut$Att$varSlopesCoreSdt <- 
  lme4::VarCorr(mdlStudentOut$Att$lmeSlopesCoreSdt)

# Assumption Checks:
mdlStudentOut$Att$diagSlopesCoreSdt <- 
  sjPlot::plot_model(mdlStudentOut$Att$lmerSlopesCoreSdt, type = "diag")
grid.arrange(
  mdlStudentOut$Att$diagSlopesCoreSdt[[1]],
  mdlStudentOut$Att$diagSlopesCoreSdt[[2]]$`PID`,
  mdlStudentOut$Att$diagSlopesCoreSdt[[3]],
  mdlStudentOut$Att$diagSlopesCoreSdt[[4]]
)

# Plot prediction model
mdlStudentOut$Att$predSlopesCoreSdt <- 
  studentOutWithinBetween %>%
  select(AttitudesDutch, TIDnum, PID) %>% 
  mutate(measure = predict(mdlStudentOut$Att$lmeSlopesCoreSdt,
                           studentOutWithinBetween,
                           re.form = NA
                           )
         )

(
  mdlStudentOut$Att$predPltSlopesCoreSdt <-
    ggplot(data = mdlStudentOut$Att$predSlopesCoreSdt %>% filter(PID %in% studentOutPltIDs),
           aes(x = TIDnum, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = AttitudesDutch), alpha = 1) +
    facet_wrap( ~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/StudentOut_PredictionPlot_SlopesAttCoreStd.png",
  mdlStudentOut$Att$predPltSlopesCoreSdt,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

We find that adding the random slopes does add significantly beyond the random intercept model. We also see that when taking the possibility to varying slopes into account, the coefficient interpretations remains consistent (i.e., core need and relatedness remain the strongest and only significant predictors).

Young Medical Professional Sample

Data Description

Participants

# extract demographic information from eligibility questionnaire
medicalDemographicSupp <- 
  dtMedical$raw.eligibility %>%
  filter(session %in% dtMedical$full$session) %>%
  select(session, nationality, studentBachMa)
# summarize participant characteristics

medicalSampleInfo <-
  merge(dtMedical$full, medicalDemographicSupp, by = "session") %>%
  mutate(gender = as.factor(ifelse(.$Gender == 1, "women", ifelse(.$Gender == 2, "man", ifelse(.$Gender == 3, "other", NA))))) %>%
  group_by(PID) %>%
  summarise(
    dailiesN = n(), 
    morningN = sum(periodMA=="morning"),
    afternoonN = sum(periodMA=="afternoon"),
    age = age,
    gender = gender,
    nationality = nationality
  ) %>%
  distinct

# look at frequencies of characteristics 
medicalSampleInfo %>% 
  ungroup %>%
  select(
    "Number of Measurements" = dailiesN,
    Age = age,
    Gender = gender,
    Nationality = nationality
  ) %>%
  mutate(
    Nationality = as.character(Nationality)
  ) %>%
  tbl_summary(.,
              sort = list(everything() ~ "frequency"))
Characteristic N = 711
Number of Measurements 61 (50, 66)
Age 22 (20, 25)
Gender
women 59 (83%)
man 12 (17%)
Nationality
Germany 19 (27%)
Italy 8 (11%)
Greece 7 (9.9%)
Romania 5 (7.0%)
Sweden 3 (4.2%)
Brazil 2 (2.8%)
India 2 (2.8%)
Moldova 2 (2.8%)
Saudi Arabia 2 (2.8%)
Slovakia 2 (2.8%)
South Africa 2 (2.8%)
United Kingdom 2 (2.8%)
Bulgaria 1 (1.4%)
Cyprus 1 (1.4%)
Estonia 1 (1.4%)
Finland 1 (1.4%)
France 1 (1.4%)
Iran 1 (1.4%)
Ireland 1 (1.4%)
Japan 1 (1.4%)
Macedonia 1 (1.4%)
Poland 1 (1.4%)
Portugal 1 (1.4%)
Russia 1 (1.4%)
Sri Lanka 1 (1.4%)
United States 1 (1.4%)
Zimbabwe 1 (1.4%)

1 Median (IQR); n (%)

Interactions

# duration of survey should include median and MAD
medicalInteractions <- dtMedical$full %>%
  dplyr::select(created.daily, ended.daily) %>%
  mutate_all(ymd_hms) %>%
  mutate(duration = as.numeric(ended.daily-created.daily)) %>%
  select(duration)

medicalInteractions %>%
  as.data.frame %>%
  psych::describe(., trim = .2) %>%
  as.data.frame %>%
  mutate(vars = c("Duration [in seconds]"), # rownames(.),
         na = nrow(dtStudents$full)-n,
         win.mean = sapply(studentInteractions,psych::winsor.mean,simplify=T),
         win.sd = sapply(studentInteractions,psych::winsor.sd,simplify=T)) %>%
  dplyr::select(characteristic = vars, n, na, 
                mean, `mean win` = win.mean, `mean trim` = trimmed, median,
                sd, `sd win` = win.sd, MAD = mad, min, max,
                skew, kurtosis) %>%
  kbl(., 
      #label = "",
      caption = "Study 2: Duration of Measurement in Seconds",
      format = "html", 
      #linesep = "",
      #booktabs = T,
      row.names = F,
      digits = 2,
      align = c('l', rep('c', ncol(.)-1)))  %>%
  add_header_above(., c(" " = 3,"Centrality" = 4, "Dispersion" = 5, "Distribution" = 2)) %>%
  footnote(general = "'na' indicates the number of measurements for which measurement duration is unknown.") %>%
  kable_classic(full_width = F, 
                lightable_options = "hover", 
                html_font = "Cambria")
Table 33: Study 2: Duration of Measurement in Seconds
Centrality
Dispersion
Distribution
characteristic n na mean mean win mean trim median sd sd win MAD min max skew kurtosis
Duration [in seconds] 3897 1068 13.83 245.1 4.61 4.22 36.79 84.41 2.1 1.28 392.5 5.64 36.39
Note:
‘na’ indicates the number of measurements for which measurement duration is unknown.
dtMedical$full %>%
  select(OutgroupInteraction,
         NonOutgroupInteraction) %>%
  tbl_summary(.,
              sort = list(everything() ~ "frequency"))
Characteristic N = 4,1071
OutgroupInteraction 1,702 (43%)
Unknown 123
NonOutgroupInteraction 2,523 (61%)
Unknown 2

1 n (%)

Variable distributions

# calculate correlations and descriptives
medicalMlCor <-
  MlCorMat(
    data = dtMedical$ful,
    id = "PID",
    selection = c("KeyNeedFulfillment", "Competence", "Autonomy", "Relatedness", "AllportsCondition", "qualityOverall", "AttitudesDutch"),
    labels = c("Core Need", "Competence", "Autonomy", "Relatedness", "Allport", "Quality", "Attitudes NL")
  ) 

medicalMlCor %>%
  kable(
    .,
    caption = "Study 3: Multilevel Core Variable Descriptives",
    format = "html",
    #booktabs = TRUE,
    linesep = linesep(c(ncol(.))),
    align = c("l", rep("c", ncol(.) - 1))
  ) %>%
  pack_rows("Correlations", 1, ncol(medicalMlCor)) %>%
  pack_rows("Descriptives", ncol(medicalMlCor)+1, nrow(medicalMlCor)) %>%
  footnote(
    general = c(
      "Upper triangle: Between-person correlations;",
      "Lower triangle: Within-person correlations;",
      "*** p < .001, ** p < .01,  * p < .05"
    )
  ) %>%
  kable_classic(full_width = F, 
                lightable_options = "hover", 
                html_font = "Cambria")
Table 34: Study 3: Multilevel Core Variable Descriptives
Core Need Competence Autonomy Relatedness Allport Quality Attitudes NL
Correlations
Core Need 0.49*** 0.58*** 0.29* 0.60*** 0.60*** 0.10
Competence 0.27*** 0.79*** 0.58*** 0.63*** 0.52*** 0.10
Autonomy 0.31*** 0.43*** 0.53*** 0.57*** 0.67*** 0.09
Relatedness 0.55*** 0.40*** 0.38*** 0.40*** 0.50*** 0.23
Allport 0.20*** 0.46*** 0.51*** 0.10*** 0.70*** 0.25*
Quality 0.39*** 0.45*** 0.44*** 0.06** -0.03 0.23*
Attitudes NL 0.51*** 0.37*** 0.55*** 0.01 0.05* 0.12***
Descriptives
Grand Mean 83.57 77.45 83.76 63.44 86.74 84.26 64.77
Between SD 8.02 11.49 9.72 13.34 7.08 10.40 14.37
Within SD 17.14 18.92 15.87 28.85 11.87 15.91 10.88
ICC(1) 0.18 0.26 0.28 0.17 0.25 0.29 0.66
ICC(2) 0.92 0.95 0.96 0.92 0.95 0.95 0.99
Note:
Upper triangle: Between-person correlations;
Lower triangle: Within-person correlations;
*** p < .001, ** p < .01, * p < .05
# calculate correlations and descriptives
medicalOutMlCor <-
  MlCorMat(
    data = dtMedical$full %>% filter(OutgroupInteraction == "Yes"),
    id = "PID",
    selection = c("KeyNeedFulfillment", "Competence", "Autonomy", "Relatedness", "AllportsCondition", "qualityOverall", "AttitudesDutch"),
    labels = c("Core Need", "Competence", "Autonomy", "Relatedness", "Allport", "Quality", "Attitudes NL")
  ) 

medicalOutMlCor %>%
  kable(
    .,
    caption = "Study 3: Multilevel Core Variable Descriptives (Outgroup Contact Only)",
    format = "html",
    #booktabs = TRUE,
    linesep = linesep(c(ncol(.))),
    align = c("l", rep("c", ncol(.) - 1))
  ) %>%
  pack_rows("Correlations", 1, ncol(medicalOutMlCor)) %>%
  pack_rows("Descriptives", ncol(medicalOutMlCor)+1, nrow(medicalOutMlCor)) %>%
  footnote(
    general = c(
      "Upper triangle: Between-person correlations;",
      "Lower triangle: Within-person correlations;",
      "*** p < .001, ** p < .01,  * p < .05"
    )
  ) %>%
  kable_classic(full_width = F, 
                lightable_options = "hover", 
                html_font = "Cambria")
Table 35: Study 3: Multilevel Core Variable Descriptives (Outgroup Contact Only)
Core Need Competence Autonomy Relatedness Allport Quality Attitudes NL
Correlations
Core Need 0.52*** 0.57*** 0.12 0.58*** 0.63*** 0.25*
Competence 0.23*** 0.79*** 0.42*** 0.60*** 0.57*** 0.32**
Autonomy 0.26*** 0.37*** 0.41*** 0.44*** 0.61*** 0.32**
Relatedness 0.52*** 0.33*** 0.31*** 0.34** 0.40*** 0.38***
Allport 0.14*** 0.37*** 0.41*** 0.24*** 0.71*** 0.44***
Quality 0.33*** 0.36*** 0.39*** 0.20*** 0.20*** 0.48***
Attitudes NL 0.43*** 0.34*** 0.48*** 0.20*** 0.23*** 0.34***
Descriptives
Grand Mean 84.84 75.94 79.07 59.62 80.87 81.14 68.24
Between SD 9.27 12.23 12.88 19.26 10.87 12.38 13.72
Within SD 13.00 17.21 15.26 23.45 12.14 16.25 11.23
ICC(1) 0.30 0.29 0.36 0.34 0.42 0.33 0.63
ICC(2) 0.91 0.91 0.93 0.93 0.95 0.92 0.98
Note:
Upper triangle: Between-person correlations;
Lower triangle: Within-person correlations;
*** p < .001, ** p < .01, * p < .05

Contact Hypothesis

Interaction Frequency and Attitudes

To test the impact of the overall number of interactions, we hope to find a significant relationship between the number of interactions a participant had and the average outgroup attitude.

\[\begin{equation} \tag{37} r_{ContactFrequency, OutgroupAttitudes} \neq 0 \end{equation}\]

medicalContactFreq <-
  medicalContactFreq %>%
  mutate(
    SumContactNL_c = SumContactNL - mean(SumContactNL, na.rm = TRUE),
    SumContactNLAll_c = SumContactNLAll - mean(SumContactNLAll, na.rm = TRUE),
    AvAttitude_c = AvAttitude - mean(AvAttitude, na.rm = TRUE),
    AvQuality_c = AvQuality - mean(AvQuality, na.rm = TRUE)
  )

# correlation panel
pairs.panels.new(
  medicalContactFreq %>% select(SumContactNL, SumContactNLAll, AvQuality, AvAttitude),
  labels = c(
    "Sum:\nNumer of beeps with Outgroup Contact (NL)",
    "Sum:\nNumber of Outgroup Contacts (NL)",
    "Mean:\nInteraction Quality",
    "Mean:\nOutgroup Attitudes (NL)"
  )
)

# correlation panel with interaction sums winsorized
pairs.panels.new(
  medicalContactFreq %>% select(WinSumContactNL, WinSumContactNLAll, AvQuality, AvAttitude),
  labels = c(
    "Sum:\nNumer of beeps with Outgroup Contact (NL)\n[Winsorized]",
    "Sum:\nNumber of Outgroup Contacts (NL)\n[Winsorized]",
    "Mean:\nInteraction Quality",
    "Mean:\nOutgroup Attitudes (NL)"
  )
)

We find that neither the number of interactions nor the number of measurement beeps with an interaction are significantly related with the average outgroup attitudes. This is to say that within our data, participants with more outgroup interactions did have significantly more positive outgroup attitudes. However, we did find a significant correlation between the participants’ Average Interaction Quality and their Average Outgroup Attitudes.

Outgroup Attitudes by Interaction Type

In a next step we take into account that having an interaction with an outgroup member, does not happen in a social vacuum. Participants who indicated that they had an interaction with an outgroup member include measurement occasions during which someone either only had an interaction with an outgroup member as well as times during which a person also had interaction(s) with a non-Dutch person. Inversely, participants who indicated that they did not have an interaction with a Dutch person might either have had no interaction at all or had an interaction with a non-Dutch person. We, thus consider all possible combinations and their independent influences on outgroup attitudes.

We first assess the impact of the different interaction types across all measurement points (lumping all beeps together).

\[\begin{equation} \tag{38} Attitude = OutgroupInteraction + NonOutgroupInteraction \end{equation}\]

# between participants interaction type
medicalAttInteractionType <- dtMedical$full %>%
  select(
    PID,
    OutgroupInteraction,
    NonOutgroupInteraction,
    Attitude = AttitudesDutch
  ) %>%
  mutate(InteractionType = paste(
    ifelse(OutgroupInteraction == "Yes", "Out", ifelse(OutgroupInteraction == "No", "NoOut", NA)),
    ifelse(NonOutgroupInteraction == "Yes", "In", ifelse(NonOutgroupInteraction == "No", "NoIn", NA)),
    sep = ", "
  )) %>%
  filter(
    !is.na(NonOutgroupInteraction),
    !is.na(Attitude)
  )

# violin plot of attitudes by interaction type group
ggplot(medicalAttInteractionType, aes(y=Attitude, x=OutgroupInteraction, group = interaction(OutgroupInteraction, NonOutgroupInteraction), fill=NonOutgroupInteraction))+
  geom_violin(trim = T, width=.8, position = position_dodge(0.8)) +
  geom_boxplot(width=0.04,
               position = position_dodge(.8),
               outlier.colour = NULL, 
               outlier.shape = NA, 
               notch=F, fill="black", 
               lwd=1, color="black") + 
  # geom_jitter(width = 0.15,
  #            shape=18,
  #            alpha=.2, size=1) +
  stat_summary(fun.y=mean, 
               geom="point", 
               shape=16, 
               size=1, 
               position = position_dodge(0.8),
               colour="white") +
  #stat_summary(fun.data="mean_sdl",  
  #             fun.args = list(mult=1), 
  #             geom="pointrange", 
  #             color = "red", 
  #             width=.2)+
  stat_summary(geom = "crossbar", 
               width=0.04, 
               fatten=0, 
               position = position_dodge(0.8),
               color="white", 
               fun.data = function(x){ return(c(y=median(x), 
                                                ymin=median(x), 
                                                ymax=median(x))) })+
  ylab("Outgroup Attitudes")+
  xlab("Outgroup Interaction") +
  ggtitle("Violin Plot:\nOutgroup Attitudes by Interaction Type")+
  scale_y_continuous(
    limits = c(0, 100),
    breaks = c(0, 15, 30, 40, 50, 60, 70, 85, 100),
    labels = c(
      "Very cold or unfavorable feelings 0°",
      "Quite cold and unfavorable feelings 15°",
      "Fairly cold and unfavorable feelings 30°",
      "A bit cold and unfavorable feelings 40°",
      "No feeling at all 50°",
      "A bit warm and favorable feelings 60°",
      "Fairly warm and favorable feelings 70° ",
      "Quite warm and favorable feelings 85° ",
      "Very warm and favorable feelings 100° "
    )
  ) +
  #coord_flip()+
  scale_fill_brewer(palette = "Pastel2", name = "Non-Outgroup Interaction")+
  theme_Publication()+
  theme(strip.background =element_rect(fill="black", color="black"),
        strip.text = element_text(colour = 'white', face="bold"))#,

        #panel.border = element_rect(color="grey"),
        #legend.position="none")
# create list to store Worker models
mdlMedical <- list()

# regression
mdlMedical$lmAttInt <-
  lm(AttitudesDutch ~ OutgroupInteraction * NonOutgroupInteraction,
    data = dtMedical$full
  )
# summary(lmstudentAttInteraction)

summ(
  mdlMedical$lmAttInt,
  confint = TRUE,
  digits = 3,
  center = TRUE
)
Observations 3907 (200 missing obs. deleted)
Dependent variable AttitudesDutch
Type OLS linear regression
F(3,3903) 45.534
0.034
Adj. R² 0.033
Est. 2.5% 97.5% t val. p
(Intercept) 62.472 61.278 63.666 102.601 0.000
OutgroupInteraction 6.807 4.963 8.652 7.236 0.000
NonOutgroupInteraction -0.970 -2.472 0.533 -1.265 0.206
OutgroupInteraction:NonOutgroupInteraction -0.449 -2.756 1.858 -0.382 0.703
Standard errors: OLS; Continuous predictors are mean-centered.

We find that while controlling for interactions with non-Dutch people, outgroup attitudes were significantly higher when participants had an interaction with a Dutch person. The effect is of a medium size (6.81 points on a 0–100 scale). However, this analysis lumps all ESM beeps from every participants together and ignores that the data is nested within participants.

Interaction Frequency and Interaction Quality

In a next step we check whether the effect outgroup interactions, in part, depends on the quality during the interactions. Because we can only assess interaction quality when there is an interaction, it is difficult to assess this with the interaction dummy as a within person predictor. Instead, we will use an aggregate measure of interaction quality and average interaction quality to consider the two predictors jointly.

\[\begin{equation} \tag{21} Attitude = ContactFreq \times AverageContactQual \end{equation}\]

# regression
mdlMedical$lmAttFreqQualX <-
  lm(AvAttitude ~ SumContactNL_c * AvQuality_c, data = medicalContactFreq)

summ(
  mdlMedical$lmAttFreqQualX,
  confint = TRUE,
  digits = 3
)
Observations 71
Dependent variable AvAttitude
Type OLS linear regression
F(3,67) 2.151
0.088
Adj. R² 0.047
Est. 2.5% 97.5% t val. p
(Intercept) 64.831 61.503 68.159 38.884 0.000
SumContactNL_c 0.151 -0.082 0.384 1.290 0.201
AvQuality_c 0.360 0.037 0.682 2.224 0.030
SumContactNL_c:AvQuality_c 0.008 -0.016 0.032 0.684 0.497
Standard errors: OLS
mdlMedical$lmAttFreqQualXEta <-
  effectsize::eta_squared(mdlMedical$lmAttFreqQualX, partial = TRUE)

interactions::interact_plot(
  mdlMedical$lmAttFreqQualX,
  pred = SumContactNL_c,
  modx = AvQuality_c,
  interval = TRUE,
  partial.residuals = TRUE
)

interactions::johnson_neyman(mdlMedical$lmAttFreqQualX,
                             pred = SumContactNL_c,
                             modx = AvQuality_c,
                             alpha = .05)
## JOHNSON-NEYMAN INTERVAL 
## 
## The Johnson-Neyman interval could not be found. Is the p value for your interaction term below the specified alpha?

We find that in the medical sample there is only a relationship between outgroup attitudes and average perceived contact quality but not the number of outgroup contacts. Nor do we find that in this sample the impact of the number of interactions is moderated by the average contact quality.

Multilevel Regression

We, then, proceed with a multilevel analysis, which inherently acknowledges that the measurements are nested within participants.

Unconditional model

We start by creating an empty random intercept model (i.e., let the outgroup attitude intercept be different between participants; unconditional model).

\[\begin{equation} \tag{39} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \end{split} \end{equation}\]

# Create and save Model
mdlMedical$lmerAttNullType <-
  lme4::lmer(AttitudesDutch ~ 1 + (1 | PID),
             data = dtMedical$full %>%
               filter(complete.cases(
                 OutgroupInteraction, NonOutgroupInteraction
               ))) # use optim if it does not converge

mdlMedical$lmeAttNullType <-
  lme(
    AttitudesDutch ~ 1,
    random = ~ 1 | PID,
    data = dtMedical$full %>%
      filter(complete.cases(
        OutgroupInteraction, NonOutgroupInteraction
      )),
    na.action = na.omit,
    control = list(opt = "nlmimb")
  ) # use optim if it does not converge

# Get summary with p-values (Satterthwaite's method)
# summary(mdlMedical$lmerAttNull) #or with the lme function
summ(mdlMedical$lmerAttNullType, digits = 3)
Observations 3907
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 29520.366
BIC 29539.178
Pseudo-R² (fixed effects) 0.000
Pseudo-R² (total) 0.665
Fixed Effects
Est. S.E. t val. d.f. p
(Intercept) 64.779 1.707 37.948 70.148 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 14.300
Residual 10.140
Grouping Variables
Group # groups ICC
PID 71 0.665
# Save variances
mdlMedical$varAttNullType <- 
  VarCorr(mdlMedical$lmeAttNullType) # save variances
# The estimate of (between-group or Intercept variance, tau_{00}^2):
mdlMedical$tauAttNullType <- 
  as.numeric(mdlMedical$varAttNullType[1])
# and the estimate of (within-group or residual variance, sigma^2) is:
mdlMedical$sigmaAttNullType <- 
  as.numeric(mdlMedical$varAttNullType[2])
# The ICC estimate (between/between+within) is:
mdlMedical$IccAttNullType <-
  (as.numeric(mdlMedical$varAttNullType[1]) / (as.numeric(mdlMedical$varAttNullType[1]) + as.numeric(mdlMedical$varAttNullType[2])))
mdlMedical$IccPercAttNull <-
  ((as.numeric(mdlMedical$varAttNullType[1]) / (as.numeric(mdlMedical$varAttNullType[1]) + as.numeric(mdlMedical$varAttNullType[2])))) * 100

We find that an estimated 66.54% of the variation in Feeling Thermometer scores is explained by between participant differences (clustering by PID). This is to say that 66.54% of the variance in any individual report of Attitudes towards the Dutch can be explained by the properties of the individual who provided the rating. And we find that including ‘participant’ as a predictor adds significantly to the model.

random intercept with predictors

To this random intercept model we now add the two types of interactions possible at each measurement point as contemporaneous predictors of outgroup attitudes. That is: We check whether within participants having an outgroup interaction (or a non-outgroup interaction) is associated with more positive outgroup attitudes at the same measurement point.

\[\begin{equation} \tag{40} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}OutgroupInteraction_{ti} + \\ &\ \beta_{2i}NonOutgroupInteraction_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \\ &\ \beta_{2i} = \gamma_{20} \end{split} \end{equation}\]

We find that a random intercept model with the two interaction types as predictors explains significantly more variance then an empty random intercept model. Looking at the individual coefficients, we find that having an outgroup interaction is indeed associated with significantly more positive outgroup attitudes, while having an interaction with a non-Dutch person does not significantly relate to more positive or negative attitudes towards the Dutch.

TL;DR: Interaction with Dutch is great predictor, interactions with non-Dutch is not.

random slope

In a next step, we check whether further letting the effect of the different interaction types vary between participants explains additional variance in outgroup attitudes (i.e., random slopes).

\[\begin{equation} \tag{41} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}OutgroupInteraction_{ti} + \\ &\ \beta_{2i}NonOutgroupInteraction_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \\ &\ \beta_{2i} = \gamma_{20} + u_{2i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlMedical$lmeSlopesAttType <- lme(
  AttitudesDutch ~
    OutgroupInteraction + NonOutgroupInteraction,
  random = ~ 1 + OutgroupInteraction + NonOutgroupInteraction | PID,
  control = lmeControl(opt = "optim"),
  na.action = na.omit,
  data = dtMedical$full
)

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlMedical$lmerSlopesAttType <- lmer(
    AttitudesDutch ~
      OutgroupInteraction + NonOutgroupInteraction +
      (1 + OutgroupInteraction + NonOutgroupInteraction | PID),
    data = dtMedical$full
  ), 
  confint = TRUE,
  digits = 3
)
Observations 3907
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 29074.080
BIC 29136.785
Pseudo-R² (fixed effects) 0.026
Pseudo-R² (total) 0.708
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 62.275 58.724 65.827 34.367 70.526 0.000
OutgroupInteractionYes 5.631 3.969 7.293 6.641 69.972 0.000
NonOutgroupInteractionYes 0.343 -0.490 1.175 0.807 54.521 0.423
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 15.012
PID OutgroupInteractionYes 6.366
PID NonOutgroupInteractionYes 1.852
Residual 9.386
Grouping Variables
Group # groups ICC
PID 71 0.719
# 95%CI
mdlMedical$lmerSlopesAttTypeCI <- 
  confint(method = "Wald", mdlMedical$lmerSlopesAttType)

# Compare new model to previous step
anova(mdlMedical$lmeAttNullType,
      mdlMedical$lmeInterceptAttType, 
      mdlMedical$lmeSlopesAttType) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  add_rownames(., var = "Description") %>%
  mutate(Description = gsub(".*\\$", "", Description)) %>%
  kbl(
    .,
    caption = "Student: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 36: Student: Model Comparison
Description Model df AIC BIC logLik Test L.Ratio p-value
lmeAttNullType 1 3 29520 29539 -14757
lmeInterceptAttType 2 5 29297 29329 -14644 1 vs 2 227.063 < .001
lmeSlopesAttType 3 10 29074 29137 -14527 2 vs 3 233.198 < .001
# Save variances
mdlMedical$varSlopesAttType <- 
  lme4::VarCorr(mdlMedical$lmeSlopesAttType)

# Assumption Checks:
mdlMedical$diagSlopesAttType <-
  sjPlot::plot_model(mdlMedical$lmerSlopesAttType, type = "diag")
grid.arrange(
  mdlMedical$diagSlopesAttType[[1]],
  mdlMedical$diagSlopesAttType[[2]]$`PID`,
  mdlMedical$diagSlopesAttType[[3]],
  mdlMedical$diagSlopesAttType[[4]]
)

# Plot prediction model
mdlMedical$predSlopesAttType <-
  dtMedical$full %>%
  filter(complete.cases(OutgroupInteraction, NonOutgroupInteraction)) %>%
  select(AttitudesDutch, TIDnum, PID) %>%
  mutate(measure = predict(
    mdlMedical$lmeSlopesAttType,
    dtMedical$full %>%
      filter(complete.cases(
        OutgroupInteraction, NonOutgroupInteraction
      )),
    re.form = NA
  ))

(
  mdlMedical$predPltSlopesAttType <-
    ggplot(data = mdlMedical$predSlopesAttType %>% filter(PID %in% medicalPltIDs), 
           aes(x = TIDnum, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = AttitudesDutch), alpha = 1) +
    facet_wrap(~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/Medical_PredictionPlot_SlopesAttType.png",
  mdlMedical$predPltSlopesAttType,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

We find that adding the random slopes does add significantly beyond the random intercept model. The individual regression coefficients remain the same in their interpretation.

TL;DR: Random slopes adds significantly. Outgroup interactions still good predictors of contemporaneous outgroup attitudes.

Allport’s Conditions

We begin our main analysis by outgroup attitudes after an intergroup contact are indeed explained by whether or not Allport’s conditions were fulfilled. This should, in turn, be due to a higher perceived interaction quality. We will this sequentially test whether the fulfillment of the core need during an interaction is (1) related to more positive outgroup attitudes, (2) higher perceived interaction quality, and (3) whether the variance explained by the core need is assumed by the perceived interaction quality if considered jointly.

Allport and Attitudes

In a first step we, thus, test the relationship between outgroup attitudes and the fulfillment of the core situational need during the interaction.

Unconditional model

We again start by creating an empty random intercept model (i.e., let the outgroup attitude intercept be different between participants; unconditional model). This is again confined to the outgroup interaction sub-sample.

\[\begin{equation} \tag{42} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \end{split} \end{equation}\]

# create empty list to organize models
mdlMedicalOut <- 
  list(
    Att = list(),
    Qlt = list()
  )

# Create and save Model
mdlMedicalOut$Att$lmerNull <-
  lme4::lmer(AttitudesDutch ~ 1 + (1 | PID),
             data = medicalOutWithinBetween) # use optim if it does not converge

mdlMedicalOut$Att$lmeNull <-
  lme(
    AttitudesDutch ~ 1,
    random = ~ 1 | PID,
    data = medicalOutWithinBetween,
    na.action = na.omit,
    control = list(opt = "nlmimb")
  ) # use optim if it does not converge

# Get summary with p-values (Satterthwaite's method)
# summary(Null.Out.ML.r) #or with the lme function
summ(mdlMedicalOut$Att$lmerNull, digits = 3, center = TRUE)
Observations 1672
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 12774.098
BIC 12790.363
Pseudo-R² (fixed effects) 0.000
Pseudo-R² (total) 0.634
Fixed Effects
Est. S.E. t val. d.f. p
(Intercept) 68.342 1.643 41.593 69.170 0.000
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.470
Residual 10.240
Grouping Variables
Group # groups ICC
PID 70 0.634
# Save variances
mdlMedicalOut$Att$varNull <- 
  VarCorr(mdlMedicalOut$Att$lmeNull) # save variances
# The estimate of (between-group or Intercept variance, tau_{00}^2):
mdlMedicalOut$Att$tauNull <- 
  as.numeric(mdlMedicalOut$Att$varNull[1])
# and the estimate of (within-group or residual variance, sigma^2) is:
mdlMedicalOut$Att$sigmaNull <- 
  as.numeric(mdlMedicalOut$Att$varNull[2])
# The ICC estimate (between/between+within) is:
mdlMedicalOut$Att$IccNull <-
  (as.numeric(mdlMedicalOut$Att$varNull[1]) / (as.numeric(mdlMedicalOut$Att$varNull[1]) + as.numeric(mdlMedicalOut$Att$varNull[2])))
mdlMedicalOut$Att$IccPercNull <-
  ((as.numeric(mdlMedicalOut$Att$varNull[1]) / (as.numeric(mdlMedicalOut$Att$varNull[1]) + as.numeric(mdlMedicalOut$Att$varNull[2])))) * 100

random intercept with level one predictors

We then add the interaction-specific measure of how much Allport’s conditions were fulfilled to the multilevel random intercept model.

\[\begin{equation} \tag{43} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}Allport_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \end{split} \end{equation}\]

# Create and save Model
mdlMedicalOut$Att$lmeInterceptAllport <-
  lme(
    AttitudesDutch ~ AllportsCondition_cwc,
    random = ~ 1 | PID,
    na.action = na.omit,
    data = medicalOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlMedicalOut$Att$lmerInterceptAllport <-
    lmer(
      AttitudesDutch ~ AllportsCondition_cwc + (1 | PID),
      data = medicalOutWithinBetween
    ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 1672
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 12693.826
BIC 12715.514
Pseudo-R² (fixed effects) 0.019
Pseudo-R² (total) 0.653
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 68.366 65.147 71.584 41.635 69.175 0.000
AllportsCondition_cwc 0.195 0.155 0.236 9.519 1601.631 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.473
Residual 9.964
Grouping Variables
Group # groups ICC
PID 70 0.646
mdlMedicalOut$Att$lmerInterceptAllportCI <- 
  confint(method = "Wald", mdlMedicalOut$Att$lmerInterceptAllport)

# Compare new model to previous step
anova(mdlMedicalOut$Att$lmeNull, 
      mdlMedicalOut$Att$lmeInterceptAllport) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Student: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 37: Student: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlMedicalOut\(Att\)lmeNull 1 3 12774 12790 -6384
mdlMedicalOut\(Att\)lmeInterceptAllport 2 4 12694 12716 -6343 1 vs 2 82.271 < .001
# Save variances
mdlMedicalOut$Att$varInterceptAllport <-
  lme4::VarCorr(mdlMedicalOut$Att$lmeInterceptAllport)

We find that the the model with the added predictor indeed explains more variance in outgroup attitudes than the unconditional model. Looking at the individual coefficients, we find that the the fulfillment of Allport’s conditions relates significantly to outgroup attitudes.

random slope

In a next step, we check whether further letting the effect of Allport’s conditions vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{44} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}Allport_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlMedicalOut$Att$lmeSlopesAllport <-
  lme(
    AttitudesDutch ~
      AllportsCondition_cwc,
    random = ~ 1 + AllportsCondition_cwc | PID,
    na.action = na.omit,
    control = lmeControl(opt = "optim"),
    data = medicalOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlMedicalOut$Att$lmerSlopesAllport <- lmer(
    AttitudesDutch ~
      AllportsCondition_cwc +
      (1 + AllportsCondition_cwc | PID),
    data = medicalOutWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 1672
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 12651.587
BIC 12684.117
Pseudo-R² (fixed effects) 0.024
Pseudo-R² (total) 0.676
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 68.364 65.165 71.564 41.880 70.339 0.000
AllportsCondition_cwc 0.221 0.147 0.295 5.863 48.904 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.407
PID AllportsCondition_cwc 0.227
Residual 9.646
Grouping Variables
Group # groups ICC
PID 70 0.659
# all variables standardized within PPT
summ(
  mdlMedicalOut$Att$lmerSlopesAllportZ <- lmer(
    AttitudesDutch_zwc ~
      AllportsCondition_zwc +
      (1 + AllportsCondition_zwc | PID),
    data = medicalOutWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 1671
Dependent variable AttitudesDutch_zwc
Type Mixed effects linear regression
AIC 4617.837
BIC 4650.364
Pseudo-R² (fixed effects) 0.042
Pseudo-R² (total) 0.068
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 0.001 -0.044 0.047 0.054 1608.124 0.957
AllportsCondition_zwc 0.206 0.144 0.269 6.470 56.186 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 0.000
PID AllportsCondition_zwc 0.160
Residual 0.948
Grouping Variables
Group # groups ICC
PID 69 0.000
# standardized coefficients
stdCoef.merMod(mdlMedicalOut$Att$lmerSlopesAllport)
##                       stdcoef   stdse
## (Intercept)            0.0000 0.00000
## AllportsCondition_cwc  0.1565 0.02669
# 95%CIs
mdlMedicalOut$Att$lmerSlopesAllportCI <- 
  confint(method = "Wald", mdlMedicalOut$Att$lmerSlopesAllport)

# Attempts at R^2
#r2mlm::r2mlm(mdlMedicalOut$Att$lmerSlopesAllport, bargraph = TRUE)
mitml::multilevelR2(mdlMedicalOut$Att$lmerSlopesAllport)
##      RB1      RB2       SB      MVP 
## 0.112416 0.009137 0.046962 0.023997
performance::r2(mdlMedicalOut$Att$lmerSlopesAllport)
## # R2 for Mixed Models
## 
##   Conditional R2: 0.676
##      Marginal R2: 0.024
performance::model_performance(mdlMedicalOut$Att$lmerSlopesAllport)
## # Indices of model performance
## 
## AIC       |       BIC | R2 (cond.) | R2 (marg.) |   ICC |  RMSE | Sigma
## -----------------------------------------------------------------------
## 12651.587 | 12684.117 |      0.676 |      0.024 | 0.668 | 9.341 | 9.646
performance::compare_performance(mdlMedicalOut$Att$lmerNull, 
                                 mdlMedicalOut$Att$lmerInterceptAllport, 
                                 mdlMedicalOut$Att$lmerSlopesAllport)
## # Comparison of Model Performance Indices
## 
## Name    |   Model |       AIC |       BIC | R2 (cond.) | R2 (marg.) |   ICC |   RMSE |  Sigma
## ---------------------------------------------------------------------------------------------
## Model 1 | lmerMod | 12774.098 | 12790.363 |      0.634 |      0.000 | 0.634 | 10.031 | 10.239
## Model 2 | lmerMod | 12693.826 | 12715.514 |      0.653 |      0.019 | 0.646 |  9.758 |  9.964
## Model 3 | lmerMod | 12651.587 | 12684.117 |      0.676 |      0.024 | 0.668 |  9.341 |  9.646
# Compare new model to previous step
anova(mdlMedicalOut$Att$lmeNull, 
      mdlMedicalOut$Att$lmeInterceptAllport, 
      mdlMedicalOut$Att$lmeSlopesAllport) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Student: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 38: Student: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlMedicalOut\(Att\)lmeNull 1 3 12774 12790 -6384
mdlMedicalOut\(Att\)lmeInterceptAllport 2 4 12694 12716 -6343 1 vs 2 82.271 < .001
mdlMedicalOut\(Att\)lmeSlopesAllport 3 6 12652 12684 -6320 2 vs 3 46.244 < .001
# Save variances
mdlMedicalOut$Att$varSlopesAllport <- 
  lme4::VarCorr(mdlMedicalOut$Att$lmeSlopesAllport)

# Assumption Checks:
mdlMedicalOut$Att$diagSlopesAllport <- 
  sjPlot::plot_model(mdlMedicalOut$Att$lmerSlopesAllport, type = "diag")
grid.arrange(
  mdlMedicalOut$Att$diagSlopesAllport[[1]],
  mdlMedicalOut$Att$diagSlopesAllport[[2]]$`PID`,
  mdlMedicalOut$Att$diagSlopesAllport[[3]],
  mdlMedicalOut$Att$diagSlopesAllport[[4]]
)

# Plot prediction model
mdlMedicalOut$Att$predSlopesAllport <- 
  medicalOutWithinBetween %>% 
  filter(PID %in% medicalOutPltIDs) %>%
  select(AttitudesDutch, TIDnum, PID) %>% 
  mutate(measure = predict(mdlMedicalOut$Att$lmeSlopesAllport,
                           medicalOutWithinBetween %>% filter(PID %in% medicalOutPltIDs),
                           re.form = NA
                           )
         )

(
  mdlMedicalOut$Att$predPltSlopesAllport <-
    ggplot(data = mdlMedicalOut$Att$predSlopesAllport, 
           aes(x = TIDnum, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = AttitudesDutch), alpha = 1) +
    facet_wrap( ~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/MedicalOut_PredictionPlot_SlopesAttAllport.png",
  mdlMedicalOut$Att$predPltSlopesAllport,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

We find that adding the random slopes does add significantly beyond the random intercept model. We also find that Allport’s conditions remains a strong predictor (even when letting the influence vary between participants).

TL;DR: The random slope adds significantly to the prediction model.

Allport and Interaction Quality

Based on the assertion that the relationship between Allport’s conditions and outgroup attitudes is explained by a higher perceived interaction, Allport’s conditions should also significantly predict perceived interaction quality.

Unconditional model

Given that we now have the perceived interaction quality as our outcome variable of interest we again begin with an unconditional model (i.e., empty random intercept model), to see whether there is enough variance to explain within the participants. Similarly to before this is again done within the subsample of measurements during which an outgroup interaction was reported.

\[\begin{equation} \tag{45} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ InteractionQuality_{ti} = &\ \beta_{0i} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \end{split} \end{equation}\]

# Create and save Model
mdlMedicalOut$Qlt$lmerNull <-
  lme4::lmer(qualityOverall ~ 1 + (1 | PID), 
             data = medicalOutWithinBetween) # use optim if it does not converge
mdlMedicalOut$Qlt$lmeNull <-
  mdlMedicalOut$Qlt$lmeNull <-lme(
    qualityOverall ~ 1,
    random = ~ 1 | PID,
    na.action = na.omit,
    data = medicalOutWithinBetween,
    control = list(opt = "nlmimb")
  ) # use optim if it does not converge

# Get summary with p-values (Satterthwaite's method)
# summary(Null.Out.Qual.ML.r) #or with the lme function
summ(mdlMedicalOut$Qlt$lmerNull, digits = 3)
Observations 1676
Dependent variable qualityOverall
Type Mixed effects linear regression
AIC 14282.191
BIC 14298.464
Pseudo-R² (fixed effects) 0.000
Pseudo-R² (total) 0.329
Fixed Effects
Est. S.E. t val. d.f. p
(Intercept) 81.359 1.453 55.988 68.217 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 11.430
Residual 16.300
Grouping Variables
Group # groups ICC
PID 70 0.329
# Save variances
mdlMedicalOut$Qlt$varNull <- 
  VarCorr(mdlMedicalOut$Qlt$lmeNull) # save variances
# The estimate of (between-group or Intercept variance, tau_{00}^2):
mdlMedicalOut$Qlt$tauNull <- 
  as.numeric(mdlMedicalOut$Qlt$varNull[1])
# and the estimate of (within-group or residual variance, sigma^2) is:
mdlMedicalOut$Qlt$sigmaNull <- 
  as.numeric(mdlMedicalOut$Qlt$varNull[2])
# The ICC estimate (between/between+within) is:
mdlMedicalOut$Qlt$IccNull <-
  (as.numeric(mdlMedicalOut$Qlt$varNull[1]) / (as.numeric(mdlMedicalOut$Qlt$varNull[1]) + as.numeric(mdlMedicalOut$Qlt$varNull[2])))
mdlMedicalOut$Qlt$IccPercNull <-
  ((as.numeric(mdlMedicalOut$Qlt$varNull[1]) / (as.numeric(mdlMedicalOut$Qlt$varNull[1]) + as.numeric(mdlMedicalOut$Qlt$varNull[2])))) * 100

random intercept with level one predictors

We then add the fulfillment of Allport’s conditions to the multilevel random intercept model.

\[\begin{equation} \tag{46} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}Allport_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \end{split} \end{equation}\]

# Create and save Model
mdlMedicalOut$Qlt$lmeInterceptAllport <-
  lme(
    qualityOverall ~ AllportsCondition_cwc,
    random = ~ 1 | PID,
    na.action = na.omit,
    data = medicalOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlMedicalOut$Qlt$lmerInterceptAllport <-
    lmer(
      qualityOverall ~ AllportsCondition_cwc + (1 | PID),
      data = medicalOutWithinBetween
    ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 1676
Dependent variable qualityOverall
Type Mixed effects linear regression
AIC 13861.637
BIC 13883.333
Pseudo-R² (fixed effects) 0.150
Pseudo-R² (total) 0.487
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 81.421 78.580 84.263 56.167 68.066 0.000
AllportsCondition_cwc 0.651 0.593 0.708 22.130 1605.645 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 11.560
Residual 14.280
Grouping Variables
Group # groups ICC
PID 70 0.396
mdlMedicalOut$Qlt$lmerInterceptAllportCI <- 
  confint(method = "Wald", mdlMedicalOut$Qlt$lmerInterceptAllport)

# Compare new model to previous step
anova(mdlMedicalOut$Qlt$lmeNull, 
      mdlMedicalOut$Qlt$lmeInterceptAllport) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Medical: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 39: Medical: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlMedicalOut\(Qlt\)lmeNull 1 3 14282 14298 -7138
mdlMedicalOut\(Qlt\)lmeInterceptAllport 2 4 13862 13883 -6927 1 vs 2 422.554 < .001
# Save variances
mdlMedicalOut$Qlt$varInterceptAllport <-
  lme4::VarCorr(mdlMedicalOut$Qlt$lmeInterceptAllport)

We find that the the model with the added predictor indeed explains more variance in outgroup attitudes than the unconditional model. Looking at the individual coefficients, we find that the the fulfillment of Allport’s conditions relates significantly to perceived interaction quality.

random slope

In a next step, we check whether further letting the effect of Allport’s conditions vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{47} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}Allport_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlMedicalOut$Qlt$lmeSlopesAllport <-
  lme(
    qualityOverall ~
      AllportsCondition_cwc,
    random = ~ 1 + AllportsCondition_cwc | PID,
    na.action = na.omit,
    control = lmeControl(opt = "optim"),
    data = medicalOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlMedicalOut$Qlt$lmerSlopesAllport <-
    lmer(
      qualityOverall ~
        AllportsCondition_cwc +
        (1 + AllportsCondition_cwc | PID),
      data = medicalOutWithinBetween
    ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 1676
Dependent variable qualityOverall
Type Mixed effects linear regression
AIC 13805.051
BIC 13837.596
Pseudo-R² (fixed effects) 0.149
Pseudo-R² (total) 0.526
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 81.415 78.572 84.258 56.124 68.031 0.000
AllportsCondition_cwc 0.650 0.542 0.758 11.822 55.279 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 11.602
PID AllportsCondition_cwc 0.334
Residual 13.743
Grouping Variables
Group # groups ICC
PID 70 0.416
mdlMedicalOut$Qlt$lmerSlopesAllportCI <- 
  confint(method = "Wald", mdlMedicalOut$Qlt$lmerSlopesAllport)

# Compare new model to previous step
anova(mdlMedicalOut$Qlt$lmeNull, 
      mdlMedicalOut$Qlt$lmeInterceptAllport, 
      mdlMedicalOut$Qlt$lmeSlopesAllport) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Medical: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 40: Medical: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlMedicalOut\(Qlt\)lmeNull 1 3 14282 14298 -7138
mdlMedicalOut\(Qlt\)lmeInterceptAllport 2 4 13862 13883 -6927 1 vs 2 422.554 < .001
mdlMedicalOut\(Qlt\)lmeSlopesAllport 3 6 13805 13838 -6897 2 vs 3 60.586 < .001
# Save variances
mdlMedicalOut$Qlt$varSlopesAllport <- 
  lme4::VarCorr(mdlMedicalOut$Qlt$lmeSlopesAllport)

# Assumption Checks:
mdlMedicalOut$Qlt$diagSlopesAllport <-
  sjPlot::plot_model(mdlMedicalOut$Qlt$lmerSlopesAllport, type = "diag")
grid.arrange(
  mdlMedicalOut$Qlt$diagSlopesAllport[[1]],
  mdlMedicalOut$Qlt$diagSlopesAllport[[2]]$`PID`,
  mdlMedicalOut$Qlt$diagSlopesAllport[[3]],
  mdlMedicalOut$Qlt$diagSlopesAllport[[4]]
)

# Plot prediction model
mdlMedicalOut$Qlt$predSlopesAllport <- 
  medicalOutWithinBetween %>%
  filter(PID %in% medicalOutPltIDs) %>%
  select(AttitudesDutch, TIDnum, PID) %>% 
  mutate(measure = predict(mdlMedicalOut$Qlt$lmeSlopesAllport,
                           medicalOutWithinBetween %>% filter(PID %in% medicalOutPltIDs),
                           re.form = NA
                           )
         )

(
  mdlMedicalOut$Qlt$predPltSlopesAllport <-
    ggplot(data = mdlMedicalOut$Qlt$predSlopesAllport %>% filter(PID %in% medicalOutPltIDs), 
           aes(x = TIDnum, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = AttitudesDutch), alpha = 1) +
    facet_wrap(~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/MedicalOut_PredictionPlot_SlopesAllport.png",
  mdlMedicalOut$Qlt$predPltSlopesAllport,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

We find that adding the random slopes does add significantly beyond the random intercept model. We also find that Allport’s conditions remains a strong predictor (even when letting the influence vary between participants).

TL;DR: The random slope adds significantly to the prediction model.

Allport, Quality, and Attitudes

In our final main step, we will jointly consider the effect of Allport’s conditions and perceived interaction quality on outgroup attitudes. We expect that if the relationship between Allport’s conditions and outgroup attitudes is indeed explained by a higher perceived interaction quality, the interaction quality perception should assume the explained variance by the fulfillment of Allport’s conditions.

random intercept with level one predictors

We thus add both Allport’s conditions and perceived interaction quality to a random intercept model of outgroup attitudes.

\[\begin{equation} \tag{48} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}Allport_{ti} + \\ &\ \beta_{2i}InteractionQuality_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \\ &\ \beta_{2i} = \gamma_{20} \end{split} \end{equation}\]

# Create and save Model
mdlMedicalOut$Att$lmeInterceptAllportQlt <-
  lme(
    AttitudesDutch ~ AllportsCondition_cwc + qualityOverall_cwc,
    random = ~ 1 | PID,
    na.action = na.omit,
    data = medicalOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlMedicalOut$Att$lmerInterceptAllportQlt <-
    lmer(
      AttitudesDutch ~ AllportsCondition_cwc + qualityOverall_cwc + (1 | PID),
      data = medicalOutWithinBetween
    ),
  confint = TRUE,
  digits = 3
)
Observations 1672
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 12586.548
BIC 12613.657
Pseudo-R² (fixed effects) 0.042
Pseudo-R² (total) 0.677
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 68.344 65.126 71.562 41.625 69.154 0.000
AllportsCondition_cwc 0.076 0.031 0.120 3.348 1600.598 0.001
qualityOverall_cwc 0.184 0.151 0.217 10.946 1600.504 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.489
Residual 9.614
Grouping Variables
Group # groups ICC
PID 70 0.663
mdlMedicalOut$Att$lmerInterceptAllportQltCI <- 
  confint(method = "Wald", mdlMedicalOut$Att$lmerInterceptAllportQlt)

# Compare new model to previous step
anova(
  mdlMedicalOut$Att$lmeNull, 
  mdlMedicalOut$Att$lmeInterceptAllportQlt
  ) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Medical: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 41: Medical: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlMedicalOut\(Att\)lmeNull 1 3 12774 12790 -6384
mdlMedicalOut\(Att\)lmeInterceptAllportQlt 2 5 12587 12614 -6288 1 vs 2 191.549 < .001
# Save variances
mdlMedicalOut$Att$varInterceptAllportQlt <-
  lme4::VarCorr(mdlMedicalOut$Att$lmeInterceptAllportQlt)

Unsurprisingly, the model with the two predictors adds significantly beyond the empty unconditional model. However, more importantly, looking at the coefficients, we find that the effect of Allport’s conditions is indeed is indeed strongly reduced and the variance is explained by the perceived interaction quality. Notably in this sample, the reduction of variance explained is not a much as we saw with core need fulfillment in previous studies.

random slope

We again check whether further letting the effects vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{49} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}Allport_{ti} + \\ &\ \beta_{2i}InteractionQuality_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \\ &\ \beta_{2i} = \gamma_{20} + u_{2i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlMedicalOut$Att$lmeSlopesAllportQlt <-
  lme(
    AttitudesDutch ~
      AllportsCondition_cwc + qualityOverall_cwc,
    random = ~ 1 + AllportsCondition_cwc + qualityOverall_cwc | PID,
    na.action = na.omit,
    control = lmeControl(opt = "optim"),
    data = medicalOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlMedicalOut$Att$lmerSlopesAllportQlt <- lmer(
    AttitudesDutch ~
      AllportsCondition_cwc + qualityOverall_cwc +
      (1 + AllportsCondition_cwc + qualityOverall_cwc | PID),
    data = medicalOutWithinBetween,
    control = lmerControl(optimizer="bobyqa", optCtrl=list(maxfun=2e5))
  ),
  confint = TRUE,
  digits = 3
)
Observations 1672
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 12519.205
BIC 12573.423
Pseudo-R² (fixed effects) 0.040
Pseudo-R² (total) 0.709
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 68.341 65.121 71.561 41.597 69.146 0.000
AllportsCondition_cwc 0.088 0.029 0.148 2.904 37.088 0.006
qualityOverall_cwc 0.174 0.120 0.228 6.299 49.858 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.520
PID AllportsCondition_cwc 0.138
PID qualityOverall_cwc 0.157
Residual 9.155
Grouping Variables
Group # groups ICC
PID 70 0.686
mdlMedicalOut$Att$lmerSlopesAllportQltCI <- 
  confint(method = "Wald", mdlMedicalOut$Att$lmerSlopesAllportQlt)

# Compare new model to previous step
anova(
  mdlMedicalOut$Att$lmeNull,
  mdlMedicalOut$Att$lmeInterceptAllportQlt,
  mdlMedicalOut$Att$lmeSlopesAllportQlt
) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Medical: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 42: Medical: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlMedicalOut\(Att\)lmeNull 1 3 12774 12790 -6384
mdlMedicalOut\(Att\)lmeInterceptAllportQlt 2 5 12587 12614 -6288 1 vs 2 191.549 < .001
mdlMedicalOut\(Att\)lmeSlopesAllportQlt 3 10 12519 12573 -6250 2 vs 3 77.306 < .001
# Save variances
mdlMedicalOut$Att$varSlopesAllportQlt <- 
  lme4::VarCorr(mdlMedicalOut$Att$lmeSlopesAllportQlt)

# Assumption Checks:
mdlMedicalOut$Att$diagSlopesAllportQlt <- 
  sjPlot::plot_model(mdlMedicalOut$Att$lmerSlopesAllportQlt, type = "diag")
grid.arrange(
  mdlMedicalOut$Att$diagSlopesAllportQlt[[1]],
  mdlMedicalOut$Att$diagSlopesAllportQlt[[2]]$`PID`,
  mdlMedicalOut$Att$diagSlopesAllportQlt[[3]],
  mdlMedicalOut$Att$diagSlopesAllportQlt[[4]]
)

# Plot prediction model
mdlMedicalOut$Att$predSlopesAllportQlt <- 
  medicalOutWithinBetween %>% 
  filter(complete.cases(AllportsCondition, qualityOverall)) %>%
  filter(PID %in% medicalOutPltIDs) %>%
  select(AttitudesDutch, TIDnum, PID) %>% 
  mutate(measure = predict(mdlMedicalOut$Att$lmeSlopesAllportQlt,
                           medicalOutWithinBetween %>% 
                             filter(complete.cases(AllportsCondition, qualityOverall)) %>%
                             filter(PID %in% medicalOutPltIDs),
                           re.form = NA
                           )
         )

(
  mdlMedicalOut$Att$predPltSlopesAllportQlt <-
    ggplot(data = mdlMedicalOut$Att$predSlopesAllportQlt, aes(x = TIDnum, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = AttitudesDutch), alpha = 1) +
    facet_wrap( ~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/MedicalOut_PredictionPlot_SlopesAttAllportQlt.png",
  mdlMedicalOut$Att$predPltSlopesAllportQlt,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

When we consider the influences of Allport’s Conditions and interaction quality on outgroup attitudes jointly, we find that perceived interaction quality is a substantially stronger predictor and the unique variance explained by Allport’s Conditions was less than half of its original effect size.

Need Fulfillment

The main focus of our proposal is again the assessment of how much positive outgroup attitudes might be explained by whether or not an intergroup interaction fulfilled the person’s core situational need. As with the previous two studies we also check to what extend this effect goes through perceived interaction quality.

Need fulfillment and Attitudes

In a first step we, again, test the relationship between outgroup attitudes and the fulfillment of the core situational need during the interaction.

random intercept with level one predictors

We jump right in with the random intercept model because we have already assessed the unconditional model when we tested the influence of Allport’s conditions on outgroup attitudes. We thus add the core interaction need fulfillment to the multilevel random intercept model.

\[\begin{equation} \tag{50} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \end{split} \end{equation}\]

# Create and save Model
mdlMedicalOut$Att$lmeInterceptCore <-
  lme(
    AttitudesDutch ~ KeyNeedFulfillment_cwc,
    random = ~ 1 | PID,
    na.action = na.omit,
    data = medicalOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlMedicalOut$Att$lmerInterceptCore <-
    lmer(
      AttitudesDutch ~ KeyNeedFulfillment_cwc + (1 | PID),
      data = medicalOutWithinBetween
    ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 1672
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 12690.495
BIC 12712.182
Pseudo-R² (fixed effects) 0.020
Pseudo-R² (total) 0.654
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 68.335 65.116 71.555 41.601 69.160 0.000
KeyNeedFulfillment_cwc 0.195 0.156 0.234 9.705 1601.524 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.479
Residual 9.953
Grouping Variables
Group # groups ICC
PID 70 0.647
mdlMedicalOut$Att$lmerInterceptCoreCI <-
  confint(method = "Wald", mdlMedicalOut$Att$lmerInterceptCore)

# Compare new model to previous step
anova(mdlMedicalOut$Att$lmeNull,
      mdlMedicalOut$Att$lmeInterceptCore) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Student: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 43: Student: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlMedicalOut\(Att\)lmeNull 1 3 12774 12790 -6384
mdlMedicalOut\(Att\)lmeInterceptCore 2 4 12690 12712 -6341 1 vs 2 85.602 < .001
# Save variances
mdlMedicalOut$Att$varInterceptCore <-
  lme4::VarCorr(mdlMedicalOut$Att$lmeInterceptCore)

We find that the the model with the added predictor indeed explains more variance in outgroup attitudes than the unconditional model. Looking at the individual coefficients, we find that the situational core need relates significantly to outgroup attitudes.

random slope

In a next step, we check whether further letting the effect of core need fulfillment vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{51} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlMedicalOut$Att$lmeSlopesCore <-
  lme(
    AttitudesDutch ~
      KeyNeedFulfillment_cwc,
    random = ~ 1 + KeyNeedFulfillment_cwc | PID,
    na.action = na.omit,
    control = lmeControl(opt = "optim"),
    data = medicalOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlMedicalOut$Att$lmerSlopesCore <- lmer(
    AttitudesDutch ~
      KeyNeedFulfillment_cwc +
      (1 + KeyNeedFulfillment_cwc | PID),
    data = medicalOutWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 1672
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 12632.018
BIC 12664.549
Pseudo-R² (fixed effects) 0.019
Pseudo-R² (total) 0.680
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 68.318 65.096 71.539 41.568 69.134 0.000
KeyNeedFulfillment_cwc 0.194 0.122 0.266 5.294 56.076 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.505
PID KeyNeedFulfillment_cwc 0.223
Residual 9.584
Grouping Variables
Group # groups ICC
PID 70 0.665
# all variables standardized within PPT
summ(
  mdlMedicalOut$Att$lmerSlopesCoreZ <- lmer(
    AttitudesDutch_zwc ~
      KeyNeedFulfillment_zwc +
      (1 + KeyNeedFulfillment_zwc | PID),
    data = medicalOutWithinBetween
  ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 1634
Dependent variable AttitudesDutch_zwc
Type Mixed effects linear regression
AIC 4530.291
BIC 4562.683
Pseudo-R² (fixed effects) 0.031
Pseudo-R² (total) 0.061
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) -0.000 -0.046 0.046 -0.013 1570.253 0.989
KeyNeedFulfillment_zwc 0.176 0.110 0.241 5.261 55.669 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 0.000
PID KeyNeedFulfillment_zwc 0.174
Residual 0.951
Grouping Variables
Group # groups ICC
PID 67 0.000
# standardized coefficients
stdCoef.merMod(mdlMedicalOut$Att$lmerSlopesCore)
##                        stdcoef   stdse
## (Intercept)             0.0000 0.00000
## KeyNeedFulfillment_cwc  0.1406 0.02656
# 95%CIs
mdlMedicalOut$Att$lmerSlopesCoreCI <- 
  confint(method = "Wald", mdlMedicalOut$Att$lmerSlopesCore)

# Attempts at R^2
r2mlm::r2mlm(mdlMedicalOut$Att$lmerSlopesCore, bargraph = TRUE)

## $Decompositions
##                   total
## fixed           0.01937
## slope variation 0.02557
## mean variation  0.63517
## sigma2          0.31989
## 
## $R2s
##       total
## f   0.01937
## v   0.02557
## m   0.63517
## fv  0.04494
## fvm 0.68011
mitml::multilevelR2(mdlMedicalOut$Att$lmerSlopesCore)
##      RB1      RB2       SB      MVP 
##  0.12390 -0.00538  0.04197  0.01937
performance::r2(mdlMedicalOut$Att$lmerSlopesCore)
## # R2 for Mixed Models
## 
##   Conditional R2: 0.680
##      Marginal R2: 0.019
performance::model_performance(mdlMedicalOut$Att$lmerSlopesCore)
## # Indices of model performance
## 
## AIC       |       BIC | R2 (cond.) | R2 (marg.) |   ICC |  RMSE | Sigma
## -----------------------------------------------------------------------
## 12632.018 | 12664.549 |      0.680 |      0.019 | 0.674 | 9.282 | 9.583
performance::compare_performance(mdlMedicalOut$Att$lmerNull, 
                                 mdlMedicalOut$Att$lmerInterceptCore, 
                                 mdlMedicalOut$Att$lmerSlopesCore)
## # Comparison of Model Performance Indices
## 
## Name    |   Model |       AIC |       BIC | R2 (cond.) | R2 (marg.) |   ICC |   RMSE |  Sigma
## ---------------------------------------------------------------------------------------------
## Model 1 | lmerMod | 12774.098 | 12790.363 |      0.634 |      0.000 | 0.634 | 10.031 | 10.239
## Model 2 | lmerMod | 12690.495 | 12712.182 |      0.654 |      0.020 | 0.647 |  9.748 |  9.953
## Model 3 | lmerMod | 12632.018 | 12664.549 |      0.680 |      0.019 | 0.674 |  9.282 |  9.583
# Compare new model to previous step
anova(mdlMedicalOut$Att$lmeNull, 
      mdlMedicalOut$Att$lmeInterceptCore, 
      mdlMedicalOut$Att$lmeSlopesCore) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Student: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 44: Student: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlMedicalOut\(Att\)lmeNull 1 3 12774 12790 -6384
mdlMedicalOut\(Att\)lmeInterceptCore 2 4 12690 12712 -6341 1 vs 2 85.602 < .001
mdlMedicalOut\(Att\)lmeSlopesCore 3 6 12632 12665 -6310 2 vs 3 62.477 < .001
# Save variances
mdlMedicalOut$Att$varSlopesCore <- 
  lme4::VarCorr(mdlMedicalOut$Att$lmeSlopesCore)

# Assumption Checks:
mdlMedicalOut$Att$diagSlopesCore <- 
  sjPlot::plot_model(mdlMedicalOut$Att$lmerSlopesCore, type = "diag")
grid.arrange(
  mdlMedicalOut$Att$diagSlopesCore[[1]],
  mdlMedicalOut$Att$diagSlopesCore[[2]]$`PID`,
  mdlMedicalOut$Att$diagSlopesCore[[3]],
  mdlMedicalOut$Att$diagSlopesCore[[4]]
)

# Plot prediction model
mdlMedicalOut$Att$predSlopesCore <- 
  medicalOutWithinBetween %>% 
  filter(complete.cases(KeyNeedFulfillment)) %>%
  filter(PID %in% medicalOutPltIDs) %>%
  select(AttitudesDutch, TIDnum, PID) %>% 
  mutate(measure = predict(mdlMedicalOut$Att$lmeSlopesCore,
                           medicalOutWithinBetween %>% 
                             filter(complete.cases(KeyNeedFulfillment)) %>% 
                             filter(PID %in% medicalOutPltIDs),
                           re.form = NA
                           )
         )

(
  mdlMedicalOut$Att$predPltSlopesCore <-
    ggplot(data = mdlMedicalOut$Att$predSlopesCore, 
           aes(x = TIDnum, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = AttitudesDutch), alpha = 1) +
    facet_wrap( ~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/MedicalOut_PredictionPlot_SlopesAttCore.png",
  mdlMedicalOut$Att$predPltSlopesCore,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

We find that adding the random slopes does add significantly beyond the random intercept model. We also find that the core need remains a strong predictor (even when letting the influence vary between participants).

TL;DR: The random slope adds significantly to the prediction model.

Need fulfillment and Interaction Quality

Based on the assertion that the relationship between core need fulfillment and outgroup attitudes is explained by a higher perceived interaction, the core need fulfillment should also significantly predict perceived interaction quality.

random intercept with level one predictor

We again do not need to calculate an unconditional model for the perceived interaction quality outcome variable because we have already done so for the previous assessment of Allport’s conditions. So we add the core interaction need fulfillment to the multilevel random intercept model.

\[\begin{equation} \tag{52} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ InteractionQuality_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \end{split} \end{equation}\]

# Create and save Model
mdlMedicalOut$Qlt$lmeInterceptCore <-
  lme(
    qualityOverall ~ KeyNeedFulfillment_cwc,
    random = ~ 1 | PID,
    na.action = na.omit,
    data = medicalOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlMedicalOut$Qlt$lmerInterceptCore <-
    lmer(
      qualityOverall ~ KeyNeedFulfillment_cwc + (1 | PID),
      data = medicalOutWithinBetween
    ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 1676
Dependent variable qualityOverall
Type Mixed effects linear regression
AIC 14098.960
BIC 14120.657
Pseudo-R² (fixed effects) 0.072
Pseudo-R² (total) 0.405
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 81.353 78.503 84.203 55.954 68.211 0.000
KeyNeedFulfillment_cwc 0.440 0.379 0.501 14.211 1605.886 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 11.510
Residual 15.370
Grouping Variables
Group # groups ICC
PID 70 0.359
mdlMedicalOut$Qlt$lmerInterceptCoreCI <- 
  confint(method = "Wald", mdlMedicalOut$Qlt$lmerInterceptCore)

# Compare new model to previous step
anova(mdlMedicalOut$Qlt$lmeNull, 
      mdlMedicalOut$Qlt$lmeInterceptCore) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Medical: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 45: Medical: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlMedicalOut\(Qlt\)lmeNull 1 3 14282 14298 -7138
mdlMedicalOut\(Qlt\)lmeInterceptCore 2 4 14099 14121 -7045 1 vs 2 185.231 < .001
# Save variances
mdlMedicalOut$Qlt$varInterceptCore <-
  lme4::VarCorr(mdlMedicalOut$Qlt$lmeInterceptCore)

The predictor again adds a significant amount of explained variances beyond the empty model and looking at the slope coefficient, we find that the situational core need fulfillment relates significantly to perceived interaction quality.

random slope

As before, we check whether further letting the effect of core need fulfillment vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{53} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ InteractionQuality_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlMedicalOut$Qlt$lmeSlopesCore <-
  lme(
    qualityOverall ~
      KeyNeedFulfillment_cwc,
    random = ~ 1 + KeyNeedFulfillment_cwc | PID,
    na.action = na.omit,
    control = lmeControl(opt = "optim"),
    data = medicalOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlMedicalOut$Qlt$lmerSlopesCore <-
    lmer(
      qualityOverall ~
        KeyNeedFulfillment_cwc +
        (1 + KeyNeedFulfillment_cwc | PID),
      data = medicalOutWithinBetween
    ),
  confint = TRUE,
  digits = 3,
  center = FALSE
)
Observations 1676
Dependent variable qualityOverall
Type Mixed effects linear regression
AIC 14054.179
BIC 14086.724
Pseudo-R² (fixed effects) 0.070
Pseudo-R² (total) 0.436
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 81.356 78.526 84.186 56.337 69.644 0.000
KeyNeedFulfillment_cwc 0.433 0.332 0.535 8.357 57.253 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 11.460
PID KeyNeedFulfillment_cwc 0.297
Residual 14.917
Grouping Variables
Group # groups ICC
PID 70 0.371
mdlMedicalOut$Qlt$lmerSlopesCoreCI <- 
  confint(method = "Wald", mdlMedicalOut$Qlt$lmerSlopesCore)

# Compare new model to previous step
anova(mdlMedicalOut$Qlt$lmeNull, 
      mdlMedicalOut$Qlt$lmeInterceptCore, 
      mdlMedicalOut$Qlt$lmeSlopesCore) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Medical: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 46: Medical: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlMedicalOut\(Qlt\)lmeNull 1 3 14282 14298 -7138
mdlMedicalOut\(Qlt\)lmeInterceptCore 2 4 14099 14121 -7045 1 vs 2 185.231 < .001
mdlMedicalOut\(Qlt\)lmeSlopesCore 3 6 14054 14087 -7021 2 vs 3 48.789 < .001
# Save variances
mdlMedicalOut$Qlt$varSlopesCore <- 
  lme4::VarCorr(mdlMedicalOut$Qlt$lmeSlopesCore)

# Assumption Checks:
mdlMedicalOut$Qlt$diagSlopesCore <-
  sjPlot::plot_model(mdlMedicalOut$Qlt$lmerSlopesCore, type = "diag")
grid.arrange(
  mdlMedicalOut$Qlt$diagSlopesCore[[1]],
  mdlMedicalOut$Qlt$diagSlopesCore[[2]]$`PID`,
  mdlMedicalOut$Qlt$diagSlopesCore[[3]],
  mdlMedicalOut$Qlt$diagSlopesCore[[4]]
)

# Plot prediction model
mdlMedicalOut$Qlt$predSlopesCore <- 
  medicalOutWithinBetween %>%
  filter(complete.cases(KeyNeedFulfillment)) %>% 
  filter(PID %in% medicalOutPltIDs) %>%
  select(AttitudesDutch, TIDnum, PID) %>% 
  mutate(measure = predict(mdlMedicalOut$Qlt$lmeSlopesCore,
                           medicalOutWithinBetween %>% 
                             filter(complete.cases(KeyNeedFulfillment)) %>% 
                             filter(PID %in% medicalOutPltIDs),
                           re.form = NA
                           )
         )

(
  mdlMedicalOut$Qlt$predPltSlopesCore <-
    ggplot(data = mdlMedicalOut$Qlt$predSlopesCore %>% filter(PID %in% medicalOutPltIDs), 
           aes(x = TIDnum, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = AttitudesDutch), alpha = 1) +
    facet_wrap(~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/MedicalOut_PredictionPlot_SlopesCore.png",
  mdlMedicalOut$Qlt$predPltSlopesCore,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

We find that adding the random slopes does add significantly beyond the random intercept model and situation key need fulfillment remains a meaningful predictor of outgroup attitudes.

Interaction Needs, Quality, and Attitudes

In our final main step, we will jointly consider the effect of core need fulfillment and perceived interaction quality on outgroup attitudes. We expect that if the relationship between core need fulfillment and outgroup attitudes is indeed explained by a higher perceived interaction quality, the interaction quality perception should assume the explained variance of the core contact need fulfillment.

random intercept with level one predictors

We thus add both the core need fulfillment and perceived interaction quality to a random intercept model of outgroup attitudes.

\[\begin{equation} \tag{54} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}InteractionQuality_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \\ &\ \beta_{2i} = \gamma_{20} \end{split} \end{equation}\]

# Create and save Model
mdlMedicalOut$Att$lmeInterceptCoreQlt <-
  lme(
    AttitudesDutch ~ KeyNeedFulfillment_cwc + qualityOverall_cwc,
    random = ~ 1 | PID,
    na.action = na.omit,
    data = medicalOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlMedicalOut$Att$lmerInterceptCoreQlt <-
    lmer(
      AttitudesDutch ~ KeyNeedFulfillment_cwc + qualityOverall_cwc + (1 | PID),
      data = medicalOutWithinBetween
    ),
  confint = TRUE,
  digits = 3
)
Observations 1672
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 12567.111
BIC 12594.220
Pseudo-R² (fixed effects) 0.046
Pseudo-R² (total) 0.681
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 68.331 65.112 71.549 41.613 69.146 0.000
KeyNeedFulfillment_cwc 0.114 0.074 0.154 5.576 1600.467 0.000
qualityOverall_cwc 0.182 0.152 0.213 11.720 1600.460 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.494
Residual 9.555
Grouping Variables
Group # groups ICC
PID 70 0.666
# 95%CI
mdlMedicalOut$Att$lmerInterceptCoreQltCI <- 
  confint(method = "Wald", mdlMedicalOut$Att$lmerInterceptCoreQlt)

# Compare new model to previous step
anova(
  mdlMedicalOut$Att$lmeNull, 
  mdlMedicalOut$Att$lmeInterceptCoreQlt
  ) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Medical: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 47: Medical: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlMedicalOut\(Att\)lmeNull 1 3 12774 12790 -6384
mdlMedicalOut\(Att\)lmeInterceptCoreQlt 2 5 12567 12594 -6279 1 vs 2 210.987 < .001
# Save variances
mdlMedicalOut$Att$varInterceptCoreQlt <-
  lme4::VarCorr(mdlMedicalOut$Att$lmeInterceptCoreQlt)

Unsurprisingly, the model with the two predictors adds significantly beyond the empty unconditional model. However, more importantly, looking at the coefficients, we find that the effect of core need fulfillment indeed is indeed strongly reduced and the variance is explained by the perceived interaction quality. However, similar to the equivalent analysis of Allport’s conditions and interaction quality, the effect of need fulfillment remains a significant predictor of outgroup attitudes when controlling for perceived interaction quality.

random slope

We again check whether further letting the effects vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{55} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}InteractionQuality_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \\ &\ \beta_{2i} = \gamma_{20} + u_{2i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlMedicalOut$Att$lmeSlopesCoreQlt <-
  lme(
    AttitudesDutch ~
      KeyNeedFulfillment_cwc + qualityOverall_cwc,
    random = ~ 1 + KeyNeedFulfillment_cwc + qualityOverall_cwc | PID,
    na.action = na.omit,
    control = lmeControl(opt = "optim"),
    data = medicalOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlMedicalOut$Att$lmerSlopesCoreQlt <- lmer(
    AttitudesDutch ~
      KeyNeedFulfillment_cwc + qualityOverall_cwc +
      (1 + KeyNeedFulfillment_cwc + qualityOverall_cwc | PID),
    data = medicalOutWithinBetween,
    control = lmerControl(optimizer="bobyqa", optCtrl=list(maxfun=2e5))
  ),
  confint = TRUE,
  digits = 3
)
Observations 1672
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 12494.269
BIC 12548.487
Pseudo-R² (fixed effects) 0.041
Pseudo-R² (total) 0.713
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 68.317 65.095 71.538 41.561 69.118 0.000
KeyNeedFulfillment_cwc 0.110 0.049 0.172 3.507 37.670 0.001
qualityOverall_cwc 0.171 0.122 0.220 6.877 46.473 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.532
PID KeyNeedFulfillment_cwc 0.168
PID qualityOverall_cwc 0.136
Residual 9.064
Grouping Variables
Group # groups ICC
PID 70 0.690
mdlMedicalOut$Att$lmerSlopesCoreQltCI <- 
  confint(method = "Wald", mdlMedicalOut$Att$lmerSlopesCoreQlt)

# Compare new model to previous step
anova(
  mdlMedicalOut$Att$lmeNull,
  mdlMedicalOut$Att$lmeInterceptCoreQlt,
  mdlMedicalOut$Att$lmeSlopesCoreQlt
) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Medical: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 48: Medical: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlMedicalOut\(Att\)lmeNull 1 3 12774 12790 -6384
mdlMedicalOut\(Att\)lmeInterceptCoreQlt 2 5 12567 12594 -6279 1 vs 2 210.987 < .001
mdlMedicalOut\(Att\)lmeSlopesCoreQlt 3 10 12494 12548 -6237 2 vs 3 82.833 < .001
# Save variances
mdlMedicalOut$Att$varSlopesCoreQlt <- 
  lme4::VarCorr(mdlMedicalOut$Att$lmeSlopesCoreQlt)

# Assumption Checks:
mdlMedicalOut$Att$diagSlopesCoreQlt <- 
  sjPlot::plot_model(mdlMedicalOut$Att$lmerSlopesCoreQlt, type = "diag")
grid.arrange(
  mdlMedicalOut$Att$diagSlopesCoreQlt[[1]],
  mdlMedicalOut$Att$diagSlopesCoreQlt[[2]]$`PID`,
  mdlMedicalOut$Att$diagSlopesCoreQlt[[3]],
  mdlMedicalOut$Att$diagSlopesCoreQlt[[4]]
)

# Plot prediction model
mdlMedicalOut$Att$predSlopesCoreQlt <- 
  medicalOutWithinBetween %>% 
  filter(complete.cases(KeyNeedFulfillment, qualityOverall)) %>%
  filter(PID %in% medicalOutPltIDs) %>%
  select(AttitudesDutch, TIDnum, PID) %>% 
  mutate(measure = predict(mdlMedicalOut$Att$lmeSlopesCoreQlt,
                           medicalOutWithinBetween %>% 
                             filter(complete.cases(KeyNeedFulfillment, qualityOverall)) %>%
                             filter(PID %in% medicalOutPltIDs),
                           re.form = NA
                           )
         )

(
  mdlMedicalOut$Att$predPltSlopesCoreQlt <-
    ggplot(data = mdlMedicalOut$Att$predSlopesCoreQlt, aes(x = TIDnum, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = AttitudesDutch), alpha = 1) +
    facet_wrap( ~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/MedicalOut_PredictionPlot_SlopesAttCoreQlt.png",
  mdlMedicalOut$Att$predPltSlopesCoreQlt,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

The random slopes add significant explained variance to the model. When we consider the influences of core need fulfillment and interaction quality on outgroup attitudes jointly, we find that perceived interaction quality is a substantially stronger predictor and the unique variance explained by core need fulfillment was roughly half of its original effect size.

Compare Fulfillment of Core Need and Allport’s Conditions

To compare the models using either Allport’s conditions or the core need fulfillment to predict outgroup attitudes, we first assess relative model performance indices (i.e., Akaike information criterion, and Bayesian information criterion), and then consider the two predictors in a joint model to see whether the two approaches predict the same variance in outgroup attitudes.

Model fit parameters

For the model fit parameters we look at the AIC, BIC, pseudo R2s, the ICC, RMSE, and Sigma.

# Compare new model to previous step
anova(mdlMedicalOut$Att$lmeSlopesAllport, 
      mdlMedicalOut$Att$lmeSlopesCore) %>%
  as.data.frame() %>%
  select(-call) %>%
  kbl(
    .,
    caption = "Medical: Comparison of Allport's Conditions to Core Situational Need",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 49: Medical: Comparison of Allport’s Conditions to Core Situational Need
Model df AIC BIC logLik
mdlMedicalOut\(Att\)lmeSlopesAllport 1 6 12652 12684 -6320
mdlMedicalOut\(Att\)lmeSlopesCore 2 6 12632 12665 -6310
AIC(
  mdlMedicalOut$Att$lmerSlopesAllport,
  mdlMedicalOut$Att$lmerSlopesCore
)
##                                     df   AIC
## mdlMedicalOut$Att$lmerSlopesAllport  6 12652
## mdlMedicalOut$Att$lmerSlopesCore     6 12632
BIC(
  mdlMedicalOut$Att$lmerSlopesAllport,
  mdlMedicalOut$Att$lmerSlopesCore
)
##                                     df   BIC
## mdlMedicalOut$Att$lmerSlopesAllport  6 12684
## mdlMedicalOut$Att$lmerSlopesCore     6 12665
performance::compare_performance(
  mdlMedicalOut$Att$lmerSlopesAllport,
  mdlMedicalOut$Att$lmerSlopesCore
)
## # Comparison of Model Performance Indices
## 
## Name    |   Model |       AIC |       BIC | R2 (cond.) | R2 (marg.) |   ICC |  RMSE | Sigma
## -------------------------------------------------------------------------------------------
## Model 1 | lmerMod | 12651.587 | 12684.117 |      0.676 |      0.024 | 0.668 | 9.341 | 9.646
## Model 2 | lmerMod | 12632.018 | 12664.549 |      0.680 |      0.019 | 0.674 | 9.282 | 9.583

When comparing the model selection indices we find that the fulfillment of the situation core need, indeed performs slightly better than the model using Allport’s conditions.

Joint model

We then consider the two predictors in a joint model to see whether the two approaches predict the same variance in outgroup attitudes.

random intercept model

We begin by adding both the core need fulfillment and Allport’s conditions to a random intercept model of outgroup attitudes.

\[\begin{equation} \tag{56} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}Allport_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \\ &\ \beta_{2i} = \gamma_{20} \end{split} \end{equation}\]

# Create and save Model
mdlMedicalOut$Att$lmeInterceptCoreAllport <-
  lme(
    AttitudesDutch ~ KeyNeedFulfillment_cwc + AllportsCondition_cwc,
    random = ~ 1 | PID,
    na.action = na.omit,
    data = medicalOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlMedicalOut$Att$lmerInterceptCoreAllport <-
    lmer(
      AttitudesDutch ~ KeyNeedFulfillment_cwc + AllportsCondition_cwc + (1 | PID),
      data = medicalOutWithinBetween
    ),
  confint = TRUE,
  digits = 3
)
Observations 1672
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 12656.923
BIC 12684.032
Pseudo-R² (fixed effects) 0.028
Pseudo-R² (total) 0.663
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 68.354 65.136 71.572 41.630 69.167 0.000
KeyNeedFulfillment_cwc 0.144 0.102 0.186 6.735 1600.543 0.000
AllportsCondition_cwc 0.141 0.098 0.184 6.473 1600.632 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.479
Residual 9.829
Grouping Variables
Group # groups ICC
PID 70 0.653
mdlMedicalOut$Att$lmerInterceptCoreAllportCI <- 
  confint(method = "Wald", mdlMedicalOut$Att$lmerInterceptCoreAllport)

# Compare new model to previous step
anova(
  mdlMedicalOut$Att$lmeNull, 
  mdlMedicalOut$Att$lmeInterceptAllport,
  mdlMedicalOut$Att$lmeInterceptCoreAllport
  ) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Medical: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 50: Medical: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlMedicalOut\(Att\)lmeNull 1 3 12774 12790 -6384
mdlMedicalOut\(Att\)lmeInterceptAllport 2 4 12694 12716 -6343 1 vs 2 82.271 < .001
mdlMedicalOut\(Att\)lmeInterceptCoreAllport 3 5 12657 12684 -6323 2 vs 3 38.904 < .001
# Save variances
mdlMedicalOut$Att$varInterceptCoreAllport <-
  lme4::VarCorr(mdlMedicalOut$Att$lmeInterceptCoreAllport)

For the random intercept model we find that the fulfillment of situational core needs adds significant explained variance beyond a model that only includes Allport’s conditions. When looking at the regression parameters in the joint model we additionally find that the fulfillment of core situational needs is ever so slightly a stronger predictor of outgroup attitudes.

random slope

We again check whether further letting the effects vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{57} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}Allport_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \\ &\ \beta_{2i} = \gamma_{20} + u_{2i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlMedicalOut$Att$lmeSlopesCoreAllport <-
  lme(
    AttitudesDutch ~
      KeyNeedFulfillment_cwc + AllportsCondition_cwc,
    random = ~ 1 + KeyNeedFulfillment_cwc + AllportsCondition_cwc | PID,
    na.action = na.omit,
    control = lmeControl(opt = "optim"),
    data = medicalOutWithinBetween
  )

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlMedicalOut$Att$lmerSlopesCoreAllport <- lmer(
    AttitudesDutch ~
      KeyNeedFulfillment_cwc + AllportsCondition_cwc +
      (1 + KeyNeedFulfillment_cwc + AllportsCondition_cwc | PID),
    data = medicalOutWithinBetween
  ),
  confint = TRUE,
  digits = 3
)
Observations 1672
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 12624.725
BIC 12678.943
Pseudo-R² (fixed effects) 0.029
Pseudo-R² (total) 0.693
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 68.331 65.052 71.611 40.837 65.417 0.000
KeyNeedFulfillment_cwc 0.127 0.083 0.171 5.640 305.647 0.000
AllportsCondition_cwc 0.167 0.095 0.239 4.533 10.466 0.001
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.760
PID KeyNeedFulfillment_cwc 0.050
PID AllportsCondition_cwc 0.209
Residual 9.528
Grouping Variables
Group # groups ICC
PID 70 0.676
# 95%CI
mdlMedicalOut$Att$lmerSlopesCoreAllportCI <- 
  confint(method = "Wald", mdlMedicalOut$Att$lmerSlopesCoreAllport)

# Compare new model to previous step
anova(
  mdlMedicalOut$Att$lmeNull,
  mdlMedicalOut$Att$lmeInterceptCoreAllport,
  mdlMedicalOut$Att$lmeSlopesCoreAllport
) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Medical: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 51: Medical: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlMedicalOut\(Att\)lmeNull 1 3 12774 12790 -6384
mdlMedicalOut\(Att\)lmeInterceptCoreAllport 2 5 12657 12684 -6323 1 vs 2 121.175 < .001
mdlMedicalOut\(Att\)lmeSlopesCoreAllport 3 10 12595 12649 -6287 2 vs 3 72.066 < .001
# Save variances
mdlMedicalOut$Att$varSlopesCoreAllport <- 
  lme4::VarCorr(mdlMedicalOut$Att$lmeSlopesCoreAllport)

# Assumption Checks:
mdlMedicalOut$Att$diagSlopesCoreAllport <- 
  sjPlot::plot_model(mdlMedicalOut$Att$lmerSlopesCoreAllport, type = "diag")
grid.arrange(
  mdlMedicalOut$Att$diagSlopesCoreQlt[[1]],
  mdlMedicalOut$Att$diagSlopesCoreQlt[[2]]$`PID`,
  mdlMedicalOut$Att$diagSlopesCoreQlt[[3]],
  mdlMedicalOut$Att$diagSlopesCoreQlt[[4]]
)

# Plot prediction model
mdlMedicalOut$Att$predSlopesCoreAllport <- 
  medicalOutWithinBetween %>% 
  filter(complete.cases(KeyNeedFulfillment, AllportsCondition)) %>%
  filter(PID %in% medicalOutPltIDs) %>%
  select(AttitudesDutch, TIDnum, PID) %>% 
  mutate(measure = predict(mdlMedicalOut$Att$lmeSlopesCoreAllport,
                           medicalOutWithinBetween %>% 
                             filter(complete.cases(KeyNeedFulfillment, AllportsCondition)) %>%
                             filter(PID %in% medicalOutPltIDs),
                           re.form = NA
                           )
         )

(
  mdlMedicalOut$Att$predPltSlopesCoreAllport <-
    ggplot(data = mdlMedicalOut$Att$predSlopesCoreQlt, aes(x = TIDnum, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = AttitudesDutch), alpha = 1) +
    facet_wrap( ~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/MedicalOut_PredictionPlot_SlopesAttCoreAllport.png",
  mdlMedicalOut$Att$predPltSlopesCoreAllport,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

Again, the random slopes add significantly to the model. When considering the predictors jointly in the random slopes model, we find that both significantly predict outgroup attitudes with similar sized regression parameters. This indicates that, although both Allport’s conditions and the core need fulfillment seem to (in part) work through perceived interaction quality, they explain different aspects of the variance in outgroup attitudes and do not constitute one another.

Robustness

To build further confidence in our results, we assess a few additional models that might offer alternative explanations of the effects we find.

Interaction Type

To make certain that the effect of core need fulfillment is specific to the interaction we compare the the effect to fulfillment of the situation core need when no outgroup interaction took place.

random intercept

Here we go back to the full dataset and add generalized situational core need fulfillment (either during an interaction or about the daytime in general) and whether an outgroup interaction happened as well as their interaction term to a random intercept model of outgroup attitudes.

\[\begin{equation} \tag{58} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}OutgroupInteraction_{ti} + \\ &\ \beta_{3i}KeyNeedFulfillXOutgroupInteraction_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \\ &\ \beta_{2i} = \gamma_{20} \\ &\ \beta_{3i} = \gamma_{30} \end{split} \end{equation}\]

# Create and save empty model for comparison
mdlMedical$lmerAttNullInt <-
  lme4::lmer(AttitudesDutch ~ 1 + (1 | PID),
             data = dtMedical$full %>%
               filter(complete.cases(
                 OutgroupInteraction
               ))) # use optim if it does not converge

mdlMedical$lmeAttNullInt <-
  lme(
    AttitudesDutch ~ 1,
    random = ~ 1 | PID,
    data = dtMedical$full %>%
      filter(complete.cases(
        OutgroupInteraction
      )),
    na.action = na.omit,
    control = list(opt = "nlmimb")
  ) # use optim if it does not converge

# Get summary with p-values (Satterthwaite's method)
#summ(mdlMedical$lmerAttNullInt, digits = 3)

# Create and save Model
mdlMedical$lmeInterceptAttCoreInt <-
  lme(
    AttitudesDutch ~ KeyNeedFulfillment_cwc * OutgroupInteraction,
    random =  ~ 1 | PID,
    na.action = na.omit,
    data = dtMedical$full
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlMedical$lmerInterceptAttCoreInt <- lmer(
    AttitudesDutch ~ KeyNeedFulfillment_cwc * OutgroupInteraction + (1 | PID),
    data = dtMedical$full
  ),
  confint = TRUE,
  digits = 3
)
Observations 3909
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 29240.674
BIC 29278.300
Pseudo-R² (fixed effects) 0.031
Pseudo-R² (total) 0.688
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 62.491 59.174 65.809 36.918 71.243 0.000
KeyNeedFulfillment_cwc 0.013 -0.009 0.035 1.192 3836.427 0.233
OutgroupInteractionYes 5.034 4.356 5.713 14.534 3847.446 0.000
KeyNeedFulfillment_cwc:OutgroupInteractionYes 0.174 0.131 0.218 7.837 3839.091 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 14.132
Residual 9.742
Grouping Variables
Group # groups ICC
PID 71 0.678
# 95%CI
mdlMedical$lmerInterceptAttCoreIntCI <- 
  confint(method = "Wald", mdlMedical$lmerInterceptAttCoreInt)

# Compare new model to previous step
anova(mdlMedical$lmeAttNullInt, 
      mdlMedical$lmeInterceptAttCoreInt) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  add_rownames(., var = "Description") %>%
  mutate(Description = gsub(".*\\$", "", Description)) %>%
  kbl(
    .,
    caption = "Medical: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = c("l", rep("c", ncol(.)-1)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 52: Medical: Model Comparison
Description Model df AIC BIC logLik Test L.Ratio p-value
lmeAttNullInt 1 3 29548 29566 -14771
lmeInterceptAttCoreInt 2 6 29241 29278 -14614 1 vs 2 312.833 < .001
# Save variances
mdlMedical$varInterceptAttCoreInt <- 
  lme4::VarCorr(mdlMedical$lmeInterceptAttCoreInt)

We find that the model explains significantly more variance than the empty null model. However, more interestingly, looking at the coefficients, we find that, as seen earlier, having an outgroup interaction has a strong effect on outgroup attitudes. Importantly, we find that there is no main effect of key need fulfillment by itself but the effect is qualified by a significant interaction effect of core need fulfillment and outgroup contact. This indicates that it is not simply key need fulfillment in general — but especially key need fulfillment during an outgroup contact that predicts more positive outgroup attitudes.

random slope

We again check whether further letting the effects vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{59} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}OutgroupInteraction_{ti} + \\ &\ \beta_{3i}KeyNeedFulfillXOutgroupInteraction_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \\ &\ \beta_{2i} = \gamma_{20} + u_{2i} \\ &\ \beta_{3i} = \gamma_{30} + u_{3i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlMedical$lmeSlopesAttCoreInt <- lme(
  AttitudesDutch ~
    KeyNeedFulfillment_cwc * OutgroupInteraction,
  random = ~ 1 + KeyNeedFulfillment_cwc + OutgroupInteraction | PID,
  control = lmeControl(opt = "optim"),
  na.action = na.omit,
  data = dtMedical$full
)

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlMedical$lmerSlopesAttCoreInt <- lmer(
    AttitudesDutch ~
      KeyNeedFulfillment_cwc * OutgroupInteraction +
      (1 + KeyNeedFulfillment_cwc + OutgroupInteraction | PID),
    data = dtMedical$full
  ), 
  confint = TRUE,
  digits = 3
)
Observations 3909
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 28973.729
BIC 29042.710
Pseudo-R² (fixed effects) 0.036
Pseudo-R² (total) 0.722
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 62.465 58.968 65.963 35.009 69.684 0.000
KeyNeedFulfillment_cwc 0.026 -0.008 0.059 1.512 68.479 0.135
OutgroupInteractionYes 5.409 3.743 7.075 6.363 70.049 0.000
KeyNeedFulfillment_cwc:OutgroupInteractionYes 0.167 0.122 0.211 7.317 2776.320 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 14.899
PID KeyNeedFulfillment_cwc 0.095
PID OutgroupInteractionYes 6.405
Residual 9.185
Grouping Variables
Group # groups ICC
PID 71 0.725
mdlMedical$lmerSlopesAttCoreIntCI <- 
  confint(method = "Wald", mdlMedical$lmerSlopesAttCoreInt)

# Compare new model to previous step
anova(mdlMedical$lmeAttNullInt, 
      mdlMedical$lmeInterceptAttCoreInt,
      mdlMedical$lmeSlopesAttCoreInt) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  add_rownames(., var = "Description") %>%
  mutate(Description = gsub(".*\\$", "", Description)) %>%
  kbl(
    .,
    caption = "Medical: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 53: Medical: Model Comparison
Description Model df AIC BIC logLik Test L.Ratio p-value
lmeAttNullInt 1 3 29548 29566 -14771
lmeInterceptAttCoreInt 2 6 29241 29278 -14614 1 vs 2 312.833 < .001
lmeSlopesAttCoreInt 3 11 28974 29043 -14476 2 vs 3 276.946 < .001
# Save variances
mdlMedical$varSlopesAttCoreInt <- 
  lme4::VarCorr(mdlMedical$lmeSlopesAttCoreInt)

# Assumption Checks:
mdlMedical$diagSlopesAttCoreInt <-
  sjPlot::plot_model(mdlMedical$lmerSlopesAttCoreInt, type = "diag")
grid.arrange(
  mdlMedical$diagSlopesAttCoreInt[[1]],
  mdlMedical$diagSlopesAttCoreInt[[2]]$`PID`,
  mdlMedical$diagSlopesAttCoreInt[[3]],
  mdlMedical$diagSlopesAttCoreInt[[4]]
)

# Plot prediction model
mdlMedical$predSlopesAttCoreInt <- 
  dtMedical$full %>%
  filter(complete.cases(KeyNeedFulfillment, OutgroupInteraction)) %>%
  filter(PID %in% medicalPltIDs) %>%
  select(AttitudesDutch, TIDnum, PID) %>% 
  mutate(measure = predict(mdlMedical$lmeSlopesAttCoreInt,
                           dtMedical$full %>% 
                             filter(complete.cases(KeyNeedFulfillment, OutgroupInteraction)) %>%
                             filter(PID %in% medicalPltIDs),
                           re.form = NA
                           )
         )

(
  mdlMedical$predPltSlopesAttCoreInt <-
    ggplot(data = mdlMedical$predSlopesAttCoreInt, aes(x = TIDnum, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = AttitudesDutch), alpha = 1) +
    facet_wrap(~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/Medical_PredictionPlot_SlopesAttCoreInt.png",
  mdlMedical$predPltSlopesAttCoreInt,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

We find that adding the random slopes does add significantly beyond the random intercept model. We also see that when taking the possibility to varying slopes into account, the coefficient interpretations remains consistent (i.e., outgroup contact and its interaction with core need fulfillment remain important predictors of positive outgroup attitudes).

Plot Interaction

Before we move on, we shortly illustrate the interaction effect of how the effect of core need fulfillment differed by whether an outgroup contact took place or not. To this end we illustrate (1) the raw data points (without taking the nested nature into account), as well as a plot of the model predicted values and their prediction interval (taking the nested structure into account based; similar to an interaction plot).

# visualize interaction
## Without ML structure
ggplot(data = dtMedical$full,
       aes(x = KeyNeedFulfillment,
           y = AttitudesDutch,
           fill = OutgroupInteraction)) +
  #geom_point()+
  geom_smooth(method = 'lm',
              aes(linetype = OutgroupInteraction),
              color = "black") +
  #facet_wrap(~PID, ncol = 6)+
  scale_linetype_manual(values = c("dashed", "solid")) +
  scale_fill_manual(values = c("darkgrey", "black")) +
  #scale_colour_manual(values=c("grey20", "black"), name="Intergroup Contact")+
  scale_y_continuous(
    limits = c(50, 100),
    breaks = seq(50, 100, by = 10),
    position = "left"
  ) +
  scale_x_continuous(limits = c(0, 100), breaks = seq(0, 100, by = 10)) +
  labs(
    title = "Without ML stucture",
    x = "Fulfillment Core Need",
    y = "Outgroup Attitudes",
    fill = "Intergroup Contact",
    linetype = "Intergroup Contact"
  ) +
  theme_Publication() +
  theme(legend.position = "bottom",
        legend.key.size = unit(1, "cm"))

## With ML structure
# create parameters for prediction
datNew = data.frame(
  KeyNeedFulfillment_cwc = rep(seq(
    round_any(min(
      dtMedical$full$KeyNeedFulfillment_cwc %>% na.omit
    ), 5, f = floor), round_any(max(
      dtMedical$full$KeyNeedFulfillment_cwc %>% na.omit
    ), 5, f = ceiling), 5
  ), 2),
  PID = 0
) %>%
  mutate(OutgroupInteraction = rep(c("No", "Yes"), each = nrow(.)/2)) %>%
  select(PID, OutgroupInteraction, KeyNeedFulfillment_cwc)


# Predict values, clean up and calculate SE
PI <-
  merTools::predictInterval(
    merMod = mdlMedical$lmerSlopesAttCoreInt,
    newdata = datNew,
    level = 0.95,
    stat = "mean",
    type = "linear.prediction",
    include.resid.var = F,
    fix.intercept.variance = F
  )
mdlMedical$predInterceptAttCoreIntX <- 
  cbind(datNew, PI)
mdlMedical$predInterceptAttCoreIntX$se <-
  (mdlMedical$predInterceptAttCoreIntX$upr - mdlMedical$predInterceptAttCoreIntX$fit) / 1.96
rm(datNew, PI)
mdlMedical$predInterceptAttCoreIntX$OutgroupInteractionLab <-
  factor(
    x = mdlMedical$predInterceptAttCoreIntX$OutgroupInteraction,
    levels = sort(
      unique(mdlMedical$predInterceptAttCoreIntX$OutgroupInteraction)
    ),
    labels = c("No", "Yes")
  )


# Plot predicted values with SE
ggplot(
  mdlMedical$predInterceptAttCoreIntX,
  aes(x = KeyNeedFulfillment_cwc,
      y = fit,
      fill = OutgroupInteractionLab)
)+
  #geom_point() +
  geom_line(aes(linetype = OutgroupInteractionLab), size = 1) +
  #facet_wrap(~PID, ncol = 6)+
  geom_ribbon(data = mdlMedical$predInterceptAttCoreIntX,
              aes(ymin = fit - se, ymax = fit + se),
              alpha = 0.3) +
  scale_x_continuous(breaks = seq(
    round_any(min(
      studentWithinBetween$KeyNeedFullfillment_cwc
    ), 5, f = floor), round_any(max(
      studentWithinBetween$KeyNeedFullfillment_cwc
    ), 5, f = ceiling), 10
  )) +
  scale_y_continuous(limits = c(50, 100), breaks = seq(50, 100, 10)) +
  scale_linetype_manual(values = c("dashed", "solid")) +
  scale_fill_manual(values = c("darkgrey", "black")) +
  labs(
    x = "Fulfillment Core Need",
    y = "Outgroup Attitude (NL)",
    fill = "Intergroup Contact",
    linetype = "Intergroup Contact",
    title = "Based on Model Predictions"
  ) +
  theme_Publication()

# #### Bayesian estimation !! ONLY RUN ON FINAL RENDER !! Takes forever ####
# options(mc.cores = parallel::detectCores())  # Run many chains simultaneously
# brmfit <- brm(
#   AttitudesDutch ~ KeyNeedFulfillment_cwc * OutgroupInteraction +
#     (1 + KeyNeedFulfillment_cwc + OutgroupInteraction | PID),
#   data = dtMedical$full,
#   family = gaussian,
#   iter = 1000,
#   chains = 4
# )
# 
# # create parameters for prediction:
# datNew = data.frame(
#   KeyNeedFulfillment_cwc = rep(seq(
#     round_any(min(
#       dtMedical$full$KeyNeedFulfillment_cwc %>% na.omit
#     ), 2, f = floor), round_any(max(
#       dtMedical$full$KeyNeedFulfillment_cwc %>% na.omit
#     ), 2, f = ceiling), 2
#   ), 2)
# ) %>%
#   mutate(OutgroupInteraction = rep(c("No", "Yes"), each = nrow(.)/2))
# 
# # Save predicted values and adjust names and labels
# fitavg <-
#   cbind(datNew,
#         fitted(brmfit, newdata = datNew, re_formula = NA)[, -2])
# names(fitavg)[names(fitavg) == "Estimate"] = "pred"
# fitavg$se <- (fitavg$Q97.5 - fitavg$pred) / 1.96
# fitavg$OutgroupInteractionLab <-
#   factor(
#     x = fitavg$OutgroupInteraction,
#     levels = sort(
#       unique(fitavg$OutgroupInteraction)
#     ),
#     labels = c("No", "Yes")
#   )
# 
# # Plot Bayesian SE prediction interval
# ggplot(fitavg,
#        aes(x = KeyNeedFulfillment_cwc,
#            y = pred,
#            fill = OutgroupInteractionLab)) +
#   scale_x_continuous(breaks = seq(
#     round_any(min(
#       dtMedical$full$KeyNeedFulfillment_cwc %>% na.omit
#     ), 5, f = floor), round_any(max(
#       dtMedical$full$KeyNeedFulfillment_cwc %>% na.omit
#     ), 5, f = ceiling), 10
#   )) +
#   scale_y_continuous(limits = c(50, 100), breaks = seq(50, 100, 10)) +
#   geom_line(aes(linetype = OutgroupInteractionLab), size = 1) +
#   geom_ribbon(aes(ymin = pred - se, ymax = pred + se), alpha = 0.2) +
#   scale_linetype_manual(values = c("dashed", "solid")) +
#   scale_fill_manual(values = c("darkgrey", "black")) +
#   labs(
#     x = "Fulfillment Core Need",
#     y = "Outgroup Attitude (NL)",
#     fill = "Intergroup Contact",
#     linetype = "Intergroup Contact",
#     title = "Based on Bayesian Prediction Interval"
#   ) +
#   theme_Publication()
# 
# # # plot all overlayed posteriors:
# # pst <- posterior_samples(brmfit, "b")
# # ggplot(dtMedical$full,
# #        aes(x = KeyNeedFulfillment_cwc, y = AttitudesDutch)) +
# #   geom_point(shape = 4, alpha = .1) +
# #   geom_tile() +
# #   geom_abline(
# #     data = pst,
# #     aes(intercept = b_Intercept, slope = b_KeyNeedFulfillment_cwc),
# #     alpha = .025,
# #     size = .4
# #   ) +
# #   labs(title = "slope Posteriors",
# #        x = "Fulfillment Core Need",
# #        y = "Outgroup Attitudes (NL)") +
# #   theme_Publication()
# # rm(datNew, brmfit, fitavg, pst)

The plots indicate that especially once we take the nested data structure into account we can see a substantially stronger effect of core need fulfillment on outgroup attitudes during outgroup contacts than without outgroup contacts.

Other psychological needs

In a final step we check whether during the interaction the core situational need is a meaningful predictor even when taking other fundamental psychological needs into account. We focus on the three commonly considered self determination needs: competence, autonomy, and relatedness.

random intercept with level one predictors

We add the core need fulfillment with the three self determination needs to a random intercept model of outgroup attitudes.

\[\begin{equation} \tag{60} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}Autonomy_{ti} + \\ &\ \beta_{3i}Competence_{ti} + \\ &\ \beta_{4i}Relatedness_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} \\ &\ \beta_{2i} = \gamma_{20} \\ &\ \beta_{3i} = \gamma_{30} \\ &\ \beta_{4i} = \gamma_{40} \end{split} \end{equation}\]

# Create and save Model
mdlMedicalOut$Att$lmeInterceptCoreSdt <-
  lme(
    AttitudesDutch ~ KeyNeedFulfillment_cwc + Competence_cwc + Autonomy_cwc + Relatedness_cwc,
    random = ~ 1 | PID,
    data = medicalOutWithinBetween,
    na.action = na.exclude
  )

# Get summary with p-values (Satterthwaite's method)
summ(
  mdlMedicalOut$Att$lmerInterceptCoreSdt <- lmer(
    AttitudesDutch ~ KeyNeedFulfillment_cwc + Competence_cwc + Autonomy_cwc + Relatedness_cwc + (1 | PID),
    data = medicalOutWithinBetween
  ),
  confint = TRUE,
  digits = 3
)
Observations 1672
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 12644.951
BIC 12682.904
Pseudo-R² (fixed effects) 0.034
Pseudo-R² (total) 0.669
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 68.333 65.114 71.553 41.602 69.153 0.000
KeyNeedFulfillment_cwc 0.152 0.112 0.192 7.424 1598.494 0.000
Competence_cwc 0.051 0.018 0.084 3.001 1598.494 0.003
Autonomy_cwc 0.040 -0.000 0.080 1.951 1598.488 0.051
Relatedness_cwc 0.053 0.031 0.076 4.612 1598.494 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.489
Residual 9.743
Grouping Variables
Group # groups ICC
PID 70 0.657
# To be compared against a model with only the self determination theory needs
mdlMedicalOut$Att$lmeInterceptSdt <-
  lme(
    AttitudesDutch ~ Competence_cwc + Autonomy_cwc + Relatedness_cwc,
    random = ~ 1 | PID,
    data = medicalOutWithinBetween,
    na.action = na.exclude
  )

summ(
  mdlMedicalOut$Att$lmerInterceptSdt <- lmer(
    AttitudesDutch ~ Competence_cwc + Autonomy_cwc + Relatedness_cwc + (1 | PID),
    data = medicalOutWithinBetween
  ),
  confint = TRUE,
  digits = 3
)
Observations 1672
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 12691.228
BIC 12723.758
Pseudo-R² (fixed effects) 0.023
Pseudo-R² (total) 0.657
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 68.338 65.118 71.558 41.597 69.159 0.000
Competence_cwc 0.064 0.031 0.097 3.753 1599.516 0.000
Autonomy_cwc 0.064 0.023 0.104 3.089 1599.512 0.002
Relatedness_cwc 0.055 0.032 0.078 4.686 1599.518 0.000
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.483
Residual 9.907
Grouping Variables
Group # groups ICC
PID 70 0.649
# Compare new model to previous steps
anova(
  mdlMedicalOut$Att$lmeInterceptSdt,
  mdlMedicalOut$Att$lmeInterceptCoreSdt
  ) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Medical: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 54: Medical: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlMedicalOut\(Att\)lmeInterceptSdt 1 6 12691 12724 -6340
mdlMedicalOut\(Att\)lmeInterceptCoreSdt 2 7 12645 12683 -6315 1 vs 2 48.277 < .001
# Save variances
mdlMedicalOut$Att$varInterceptCoreSdt <-
  lme4::VarCorr(mdlMedicalOut$Att$lmeInterceptCoreSdt)

We find that the the model with the added predictor indeed explains more variance in outgroup attitudes than the unconditional model and we find that adding the core need adds further explained variance — beyond the self determination needs. Looking at the individual coefficients, we find that the situational core need relates significantly to outgroup attitudes, that it is a stronger predictor than any of the self determination theory needs and that it assumes some of the variance explained by the self determination theory needs (when compared to a model without the core need).

random slope

We again check whether further letting the effects vary between participants explains additional variance in outgroup attitudes (i.e., random slope).

\[\begin{equation} \tag{61} \begin{split} \textrm{Level 1:} & \begin{aligned}[t] \ Attitude_{ti} = &\ \beta_{0i} + \beta_{1i}KeyNeedFulfill_{ti} + \\ &\ \beta_{2i}Autonomy_{ti} + \\ &\ \beta_{3i}Competence_{ti} + \\ &\ \beta_{4i}Relatedness_{ti} + e_{ti} \end{aligned} \\ \textrm{Level 2:} &\ \beta_{0i} = \gamma_{00} + u_{0i} \\ &\ \beta_{1i} = \gamma_{10} + u_{1i} \\ &\ \beta_{2i} = \gamma_{20} + u_{2i} \\ &\ \beta_{3i} = \gamma_{30} + u_{3i} \\ &\ \beta_{4i} = \gamma_{40} + u_{4i} \end{split} \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
mdlMedicalOut$Att$lmeSlopesCoreSdt <-
  lme(
    AttitudesDutch ~
      KeyNeedFulfillment_cwc + Competence_cwc + Autonomy_cwc + Relatedness_cwc,
    random = ~ 1 + KeyNeedFulfillment_cwc + Competence_cwc + Relatedness_cwc | PID,
    control = lmeControl(opt = "optim", maxIter = 100, msMaxIter = 100),
    data = medicalOutWithinBetween,
    na.action = na.exclude
  )

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlMedicalOut$Att$lmerSlopesCoreSdt <- lmer(
    AttitudesDutch ~
      KeyNeedFulfillment_cwc + Competence_cwc + Autonomy_cwc + Relatedness_cwc +
      (1 + KeyNeedFulfillment_cwc + Competence_cwc + Autonomy_cwc + Relatedness_cwc | PID),
    data = medicalOutWithinBetween
  ),
  confint = TRUE,
  digits = 3
)
Observations 1672
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 12581.272
BIC 12695.129
Pseudo-R² (fixed effects) 0.035
Pseudo-R² (total) 0.710
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 68.317 65.106 71.529 41.691 69.721 0.000
KeyNeedFulfillment_cwc 0.146 0.074 0.218 3.999 49.060 0.000
Competence_cwc 0.058 0.016 0.101 2.682 29.711 0.012
Autonomy_cwc 0.038 -0.006 0.082 1.692 66.265 0.095
Relatedness_cwc 0.055 0.024 0.086 3.482 47.083 0.001
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.485
PID KeyNeedFulfillment_cwc 0.222
PID Competence_cwc 0.089
PID Autonomy_cwc 0.060
PID Relatedness_cwc 0.078
Residual 9.130
Grouping Variables
Group # groups ICC
PID 70 0.686
# 95%CI
mdlMedicalOut$Att$lmerSlopesCoreSdtCI <- 
  confint(method = "Wald", mdlMedicalOut$Att$lmerSlopesCoreSdt)

# Compare new model to previous step
anova(mdlMedicalOut$Att$lmeInterceptSdt,
      mdlMedicalOut$Att$lmeInterceptCoreSdt, 
      mdlMedicalOut$Att$lmeSlopesCoreSdt) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Medical: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 55: Medical: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlMedicalOut\(Att\)lmeInterceptSdt 1 6 12691 12724 -6340
mdlMedicalOut\(Att\)lmeInterceptCoreSdt 2 7 12645 12683 -6315 1 vs 2 48.277 < .001
mdlMedicalOut\(Att\)lmeSlopesCoreSdt 3 16 12576 12663 -6272 2 vs 3 86.881 < .001
# model with SDT only
# Create and save Model (optimizer needed to reach convergence)
mdlMedicalOut$Att$lmeSlopesSdt <-
  lme(
    AttitudesDutch ~
      Competence_cwc + Autonomy_cwc + Relatedness_cwc,
    random = ~ 1 + Competence_cwc + Autonomy_cwc + Relatedness_cwc | PID,
    control = lmeControl(opt = "optim"),
    data = medicalOutWithinBetween,
    na.action = na.exclude
  )

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
summ(
  mdlMedicalOut$Att$lmerSlopesSdt <- lmer(
    AttitudesDutch ~
      Competence_cwc + Autonomy_cwc + Relatedness_cwc +
      (1 + Competence_cwc + Autonomy_cwc + Relatedness_cwc | PID),
    data = medicalOutWithinBetween
  ),
  confint = TRUE,
  digits = 3
)
Observations 1672
Dependent variable AttitudesDutch
Type Mixed effects linear regression
AIC 12667.058
BIC 12748.385
Pseudo-R² (fixed effects) 0.030
Pseudo-R² (total) 0.623
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 68.353 65.542 71.163 47.665 101.288 0.000
Competence_cwc 0.070 0.025 0.116 3.012 36.992 0.005
Autonomy_cwc 0.060 0.008 0.111 2.280 42.418 0.028
Relatedness_cwc 0.060 0.028 0.092 3.657 48.983 0.001
p values calculated using Satterthwaite d.f.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 11.720
PID Competence_cwc 0.102
PID Autonomy_cwc 0.108
PID Relatedness_cwc 0.082
Residual 9.600
Grouping Variables
Group # groups ICC
PID 70 0.598
# Compare new model to previous step
anova(mdlMedicalOut$Att$lmeInterceptSdt,
      mdlMedicalOut$Att$lmeSlopesSdt, 
      mdlMedicalOut$Att$lmeSlopesCoreSdt) %>%
  as.data.frame() %>%
  select(-call) %>%
  mutate(
    L.Ratio = round(L.Ratio, 3),
    `p-value` = ifelse(`p-value`>=.001, round(`p-value`, 3), "< .001")
  ) %>%
  replace(is.na(.), "") %>%
  kbl(
    .,
    caption = "Medical: Model Comparison",
    format = "html",
    linesep = "",
    booktabs = T,
    align = rep("c", ncol(.)),
    digits = 3
  ) %>%
  kable_styling(position = "left")
Table 55: Medical: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
mdlMedicalOut\(Att\)lmeInterceptSdt 1 6 12691 12724 -6340
mdlMedicalOut\(Att\)lmeSlopesSdt 2 15 12666 12747 -6318 1 vs 2 43.71 < .001
mdlMedicalOut\(Att\)lmeSlopesCoreSdt 3 16 12576 12663 -6272 2 vs 3 91.447 < .001
# Save variances
mdlMedicalOut$Att$varSlopesCoreSdt <- 
  lme4::VarCorr(mdlMedicalOut$Att$lmeSlopesCoreSdt)

# Assumption Checks:
mdlMedicalOut$Att$diagSlopesCoreSdt <- 
  sjPlot::plot_model(mdlMedicalOut$Att$lmerSlopesCoreSdt, type = "diag")
grid.arrange(
  mdlMedicalOut$Att$diagSlopesCoreSdt[[1]],
  mdlMedicalOut$Att$diagSlopesCoreSdt[[2]]$`PID`,
  mdlMedicalOut$Att$diagSlopesCoreSdt[[3]],
  mdlMedicalOut$Att$diagSlopesCoreSdt[[4]]
)

# Plot prediction model
mdlMedicalOut$Att$predSlopesCoreSdt <- 
  medicalOutWithinBetween %>%
  filter(complete.cases(
    KeyNeedFulfillment,
    Autonomy, Competence, Relatedness
    )) %>%
  filter(PID %in% medicalOutPltIDs) %>%
  select(AttitudesDutch, TIDnum, PID, Autonomy, Competence, Relatedness) %>% 
  mutate(measure = predict(mdlMedicalOut$Att$lmeSlopesCoreSdt,
                           medicalOutWithinBetween %>% 
                             filter(complete.cases(
                               KeyNeedFulfillment,
                               Autonomy, Competence, Relatedness
                             )) %>%
                             filter(PID %in% medicalOutPltIDs),
                           re.form = NA
                           )
         )

(
  mdlMedicalOut$Att$predPltSlopesCoreSdt <-
    ggplot(data = mdlMedicalOut$Att$predSlopesCoreSdt, aes(x = TIDnum, y = measure)) +
    geom_line(alpha = 1, color = "blue") +
    geom_line(aes(y = AttitudesDutch), alpha = 1) +
    facet_wrap( ~ PID, ncol = 6) +
    xlab("Time") +
    ylab("Outgroup Attitudes") +
    theme_Publication()
)

ggsave(
  filename = "Figures/MedicalOut_PredictionPlot_SlopesAttCoreStd.png",
  mdlMedicalOut$Att$predPltSlopesCoreSdt,
  width = 18,
  height = 12,
  dpi = 800,
  units = "cm",
  device = "png"
)

# Model comparison
medicalSdtComp <-
  anova(
    mdlMedicalOut$Att$lmerSlopesSdt, 
    mdlMedicalOut$Att$lmerSlopesCoreSdt
  ) %>%
  as.data.frame()
medicalSdtCompDf <- medicalSdtComp["mdlMedicalOut$Att$lmerSlopesCoreSdt", "Df"]
medicalSdtCompN <- sapply(ranef(mdlMedicalOut$Att$lmerSlopesCoreSdt), nrow)
medicalSdtCompChi <- medicalSdtComp["mdlMedicalOut$Att$lmerSlopesCoreSdt", "Chisq"] %>% round(2) %>% format(nsmall=2)
medicalSdtCompP <-
  ifelse(medicalSdtComp["mdlMedicalOut$Att$lmerSlopesCoreSdt", "Pr(>Chisq)"] < .001,
         "< .001",
         paste0("= ", medicalSdtComp["mdlMedicalOut$Att$lmerSlopesCoreSdt", "Pr(>Chisq)"] %>% round(3) %>% format(nsmall = 3)))

medicalSdtCompChiSq <- paste0("$chi^2$(", medicalSdtCompDf, ", <i>N</i> = ", medicalSdtCompN, ") = ", medicalSdtCompChi, ", <i>p</i> ", medicalSdtCompP)

When compared to the model with only the SDT needs, the core need fulfillment flexibly takes on some of the explained variance of all of the three fundamental needs. However, similar to the previous study, in this large sample relatedness, competence, and autonomy each predicted positive outgroup attitudes independently. However, the regression coefficient is three times as large for the core need fulfillment (with all scaling being equal).

Forest Plots of Main Analyses

Contact Hypothesis

Aggregate Data

Summary of regression results from the aggregated contact and interaction quality data.

Multilevel Analysis

Summary of mixed models results of the contemporaneous contact effects.

Core Need Fulfillment

Core Need Fulfillment predicting Interaction Quality.

ore Need Fulfillment predicting Outgroup Attitudes.

Core Need Fulfillment and Interaction Quality predicting Outgroup Attitudes.

Robustness Analyses

Need Fulfillment and Intergroup Contact predicting Outgroup Attitudes (full sample).

Core Need Fulfillment predicting Outgroup Attitudes, while controlling for self-determination theory needs (intergroup contact sample).

Export for Methods and Results

We also export all relevant data for the Methods and Results section, which are written in a separate RMarkdown file and is linked to the full \(\LaTeX{}\) manuscript file in Overleaf.

# collect supplementary data files
# Worker sample
dtWorkerSupp <- lapply(ls(pattern = "worker"), get)
names(dtWorkerSupp) <- ls(pattern = "worker")

# Student sample
dtStudentSupp <- lapply(ls(pattern = "student"), get)
names(dtStudentSupp) <- ls(pattern = "student")

# Medical sample
dtMedicalSupp <- lapply(ls(pattern = "medical"), get)
names(dtMedicalSupp) <- ls(pattern = "medical")

# collect and export all data files
save(list = ls(pattern = "dt"), file = "data/wrangled.RData")

Software Information

The full session information with all relevant system information and all loaded and installed packages is available in the collapsible section below.

System Info
Table 56: R environment session info for reproducibility of results
Setting Value
version R version 4.1.1 (2021-08-10)
os macOS Big Sur 10.16
system x86_64, darwin17.0
ui X11
language (EN)
collate en_US.UTF-8
ctype en_US.UTF-8
tz Europe/Amsterdam
date 2022-03-02

Package Info
Table 57: Package info for reproducibility of results
Package Loaded version Date Source
bookdown 0.24 2021-09-02 CRAN (R 4.1.0)
brms 2.16.1 2021-08-23 CRAN (R 4.1.0)
data.table 1.14.0 2021-02-21 CRAN (R 4.1.0)
devtools 2.4.2 2021-06-07 CRAN (R 4.1.0)
dplyr 1.0.7 2021-06-18 CRAN (R 4.1.0)
ellipse 0.4.2 2020-05-27 CRAN (R 4.1.0)
Formula 1.2-4 2020-10-16 CRAN (R 4.1.0)
ggpattern 0.2.0 2021-10-11 Github ()
ggplot2 3.3.5 2021-06-25 CRAN (R 4.1.0)
ggthemes 4.2.4 2021-01-20 CRAN (R 4.1.0)
gridExtra 2.3 2017-09-09 CRAN (R 4.1.0)
gtsummary 1.4.2 2021-07-13 CRAN (R 4.1.0)
haven 2.4.3 2021-08-04 CRAN (R 4.1.0)
Hmisc 4.5-0 2021-02-28 CRAN (R 4.1.0)
jtools 2.1.4 2021-09-03 CRAN (R 4.1.0)
kableExtra 1.3.4 2021-02-20 CRAN (R 4.1.0)
knitr 1.36 2021-09-29 CRAN (R 4.1.0)
lattice 0.20-44 2021-05-02 CRAN (R 4.1.1)
lme4 1.1-27.1 2021-06-22 CRAN (R 4.1.0)
lubridate 1.7.10 2021-02-26 CRAN (R 4.1.0)
mada 0.5.10 2020-05-25 CRAN (R 4.1.0)
Matrix 1.3-4 2021-06-01 CRAN (R 4.1.1)
metafor 3.0-2 2021-06-09 CRAN (R 4.1.0)
mvmeta 1.0.3 2019-12-10 CRAN (R 4.1.0)
mvtnorm 1.1-2 2021-06-07 CRAN (R 4.1.0)
nlme 3.1-152 2021-02-04 CRAN (R 4.1.1)
pander 0.6.4 2021-06-13 CRAN (R 4.1.0)
papaja 0.1.0.9997 2021-10-11 Github ()
plotly 4.9.4.9000 2021-08-28 Github ()
plyr 1.8.6 2020-03-03 CRAN (R 4.1.0)
psych 2.1.9 2021-09-22 CRAN (R 4.1.0)
purrr 0.3.4 2020-04-17 CRAN (R 4.1.0)
RColorBrewer 1.1-2 2014-12-07 CRAN (R 4.1.0)
Rcpp 1.0.7 2021-07-07 CRAN (R 4.1.0)
remedy 0.1.0 2018-12-03 CRAN (R 4.1.0)
reshape2 1.4.4 2020-04-09 CRAN (R 4.1.0)
rmarkdown 2.11 2021-09-14 CRAN (R 4.1.1)
sessioninfo 1.1.1 2018-11-05 CRAN (R 4.1.0)
stringi 1.7.5 2021-10-04 CRAN (R 4.1.0)
stringr 1.4.0 2019-02-10 CRAN (R 4.1.0)
survival 3.2-12 2021-08-13 CRAN (R 4.1.1)
tibble 3.1.5 2021-09-30 CRAN (R 4.1.0)
tidyr 1.1.4 2021-09-27 CRAN (R 4.1.0)
usethis 2.0.1 2021-02-10 CRAN (R 4.1.0)

Full Session Info (including loaded but unattached packages — for troubleshooting only)

R version 4.1.1 (2021-08-10)

Platform: x86_64-apple-darwin17.0 (64-bit)

locale: en_US.UTF-8||en_US.UTF-8||en_US.UTF-8||C||en_US.UTF-8||en_US.UTF-8

attached base packages:

  • grid
  • stats
  • graphics
  • grDevices
  • datasets
  • utils
  • methods
  • base

other attached packages:

  • metafor(v.3.0-2)
  • purrr(v.0.3.4)
  • lubridate(v.1.7.10)
  • reshape2(v.1.4.4)
  • stringi(v.1.7.5)
  • stringr(v.1.4.0)
  • papaja(v.0.1.0.9997)
  • kableExtra(v.1.3.4)
  • Hmisc(v.4.5-0)
  • Formula(v.1.2-4)
  • survival(v.3.2-12)
  • lattice(v.0.20-44)
  • tidyr(v.1.1.4)
  • dplyr(v.1.0.7)
  • plyr(v.1.8.6)
  • data.table(v.1.14.0)
  • mada(v.0.5.10)
  • mvmeta(v.1.0.3)
  • ellipse(v.0.4.2)
  • mvtnorm(v.1.1-2)
  • devtools(v.2.4.2)
  • usethis(v.2.0.1)
  • pander(v.0.6.4)
  • tibble(v.3.1.5)
  • sessioninfo(v.1.1.1)
  • gtsummary(v.1.4.2)
  • jtools(v.2.1.4)
  • nlme(v.3.1-152)
  • lme4(v.1.1-27.1)
  • Matrix(v.1.3-4)
  • ggpattern(v.0.2.0)
  • gridExtra(v.2.3)
  • plotly(v.4.9.4.9000)
  • RColorBrewer(v.1.1-2)
  • haven(v.2.4.3)
  • ggthemes(v.4.2.4)
  • ggplot2(v.3.3.5)
  • psych(v.2.1.9)
  • brms(v.2.16.1)
  • Rcpp(v.1.0.7)
  • bookdown(v.0.24)
  • remedy(v.0.1.0)
  • knitr(v.1.36)
  • rmarkdown(v.2.11)

loaded via a namespace (and not attached):

  • mitml(v.0.4-3)
  • svglite(v.2.0.0)
  • class(v.7.3-19)
  • ps(v.1.6.0)
  • foreach(v.1.5.1)
  • projpred(v.2.0.2)
  • rprojroot(v.2.0.2)
  • crayon(v.1.4.1)
  • V8(v.3.4.2)
  • MASS(v.7.3-54)
  • backports(v.1.2.1)
  • posterior(v.1.1.0)
  • colourpicker(v.1.1.0)
  • rlang(v.0.4.11)
  • performance(v.0.7.3)
  • nloptr(v.1.2.2.2)
  • callr(v.3.7.0)
  • glue(v.1.4.2)
  • loo(v.2.4.1)
  • sjPlot(v.2.8.9)
  • pbkrtest(v.0.5.1)
  • rstan(v.2.21.2)
  • parallel(v.4.1.1)
  • processx(v.3.5.2)
  • tidyselect(v.1.1.1)
  • interactions(v.1.1.5)
  • XML(v.3.99-0.8)
  • zoo(v.1.8-9)
  • sjmisc(v.2.8.7)
  • distributional(v.0.2.2)
  • chron(v.2.3-56)
  • xtable(v.1.8-4)
  • magrittr(v.2.0.1)
  • evaluate(v.0.14)
  • cli(v.3.0.1)
  • rstudioapi(v.0.13)
  • miniUI(v.0.1.1.1)
  • bslib(v.0.3.0)
  • rpart(v.4.1-15)
  • wordcloud(v.2.6)
  • mathjaxr(v.1.4-0)
  • sjlabelled(v.1.1.8)
  • shinystan(v.2.5.0)
  • shiny(v.1.6.0)
  • xfun(v.0.26)
  • tm(v.0.7-8)
  • parameters(v.0.14.0)
  • inline(v.0.3.19)
  • pkgbuild(v.1.2.0)
  • cluster(v.2.1.2)
  • bridgesampling(v.1.1-2)
  • nFactors(v.2.4.1)
  • expm(v.0.999-6)
  • Brobdingnag(v.1.2-6)
  • polycor(v.0.7-10)
  • threejs(v.0.3.3)
  • qdap(v.2.4.3)
  • png(v.0.1-7)
  • withr(v.2.4.2)
  • berryFunctions(v.1.20.1)
  • slam(v.0.1-48)
  • bitops(v.1.0-7)
  • openNLP(v.0.2-7)
  • e1071(v.1.7-9)
  • coda(v.0.19-4)
  • pillar(v.1.6.3)
  • RcppParallel(v.5.1.4)
  • cachem(v.1.0.6)
  • multcomp(v.1.4-18)
  • broom.helpers(v.1.4.0)
  • fs(v.1.5.0)
  • NLP(v.0.2-1)
  • xts(v.0.12.1)
  • vctrs(v.0.3.8)
  • pbivnorm(v.0.6.0)
  • ellipsis(v.0.3.2)
  • generics(v.0.1.0)
  • dygraphs(v.1.1.1.6)
  • Metaan(v.0.1.0)
  • tools(v.4.1.1)
  • foreign(v.0.8-81)
  • munsell(v.0.5.0)
  • gamm4(v.0.2-6)
  • qdapTools(v.1.3.5)
  • emmeans(v.1.6.3)
  • proxy(v.0.4-26)
  • fastmap(v.1.1.0)
  • compiler(v.4.1.1)
  • pkgload(v.1.2.1)
  • abind(v.1.4-5)
  • httpuv(v.1.6.3)
  • gt(v.0.3.1)
  • qdapDictionaries(v.1.0.7)
  • rJava(v.1.0-6)
  • DescTools(v.0.99.43)
  • ltm(v.1.1-1)
  • glmmTMB(v.1.1.2.3)
  • msm(v.1.6.9)
  • utf8(v.1.2.2)
  • later(v.1.3.0)
  • misty(v.0.4.3)
  • pan(v.1.6)
  • jomo(v.2.7-2)
  • jsonlite(v.1.7.2)
  • arm(v.1.11-2)
  • scales(v.1.1.1)
  • gld(v.2.6.2)
  • carData(v.3.0-4)
  • estimability(v.1.3)
  • renv(v.0.14.0)
  • lazyeval(v.0.2.2)
  • promises(v.1.2.0.1)
  • latticeExtra(v.0.6-29)
  • effectsize(v.0.4.5)
  • checkmate(v.2.0.0)
  • openxlsx(v.4.2.4)
  • sandwich(v.3.0-1)
  • blme(v.1.0-5)
  • webshot(v.0.5.2)
  • forcats(v.0.5.1)
  • igraph(v.1.2.6)
  • plotrix(v.3.8-2)
  • numDeriv(v.2016.8-1.1)
  • rsconnect(v.0.8.24)
  • yaml(v.2.2.1)
  • systemfonts(v.1.0.2)
  • qdapRegex(v.0.7.2)
  • bayesplot(v.1.8.1)
  • htmltools(v.0.5.2)
  • rstantools(v.2.1.1)
  • memoise(v.2.0.0)
  • lavaan(v.0.6-9)
  • viridisLite(v.0.4.0)
  • digest(v.0.6.28)
  • assertthat(v.0.2.1)
  • mime(v.0.12)
  • commonmark(v.1.7)
  • bayestestR(v.0.10.5)
  • rockchalk(v.1.8.144)
  • Exact(v.3.0)
  • remotes(v.2.4.0)
  • openNLPdata(v.1.5.3-4)
  • shinythemes(v.1.2.0)
  • splines(v.4.1.1)
  • labeling(v.0.4.2)
  • rematch2(v.2.1.2)
  • r2mlm(v.0.3.0)
  • RCurl(v.1.98-1.4)
  • broom(v.0.7.9.9001)
  • hms(v.1.1.1)
  • modelr(v.0.1.8)
  • colorspace(v.2.0-2)
  • base64enc(v.0.1-3)
  • mnormt(v.2.0.2)
  • tmvnsim(v.1.0-2)
  • broom.mixed(v.0.2.7)
  • nnet(v.7.3-16)
  • sass(v.0.4.0)
  • fansi(v.0.5.0)
  • tzdb(v.0.1.2)
  • R6(v.2.5.1)
  • horst(v.0.1)
  • ggridges(v.0.5.3)
  • lifecycle(v.1.0.1)
  • rootSolve(v.1.8.2.2)
  • r2glmm(v.0.1.2)
  • StanHeaders(v.2.21.0-7)
  • zip(v.2.2.0)
  • datawizard(v.0.2.0)
  • curl(v.4.3.2)
  • minqa(v.1.2.4)
  • testthat(v.3.0.4)
  • jquerylib(v.0.1.4)
  • snakecase(v.0.11.0)
  • broomExtra(v.4.2.3)
  • venneuler(v.1.1-0)
  • desc(v.1.3.0)
  • TH.data(v.1.1-0)
  • iterators(v.1.0.13)
  • TMB(v.1.7.21)
  • Scale(v.1.0.4)
  • htmlwidgets(v.1.5.4)
  • markdown(v.1.1)
  • crosstalk(v.1.1.1)
  • rvest(v.1.0.1)
  • mgcv(v.1.8-36)
  • insight(v.0.14.3)
  • lmom(v.2.8)
  • mixmeta(v.1.1.3)
  • htmlTable(v.2.2.1)
  • tensorA(v.0.36.2)
  • codetools(v.0.2-18)
  • matrixStats(v.0.60.1)
  • gtools(v.3.9.2)
  • prettyunits(v.1.1.1)
  • gtable(v.0.3.0)
  • DBI(v.1.1.1)
  • stats4(v.4.1.1)
  • httr(v.1.4.2)
  • highr(v.0.9)
  • farver(v.2.1.0)
  • DT(v.0.19)
  • xml2(v.1.3.2)
  • gender(v.0.6.0)
  • boot(v.1.3-28)
  • shinyjs(v.2.0.0)
  • ggeffects(v.1.1.1)
  • readr(v.2.0.2)
  • kutils(v.1.70)
  • sjstats(v.0.18.1)
  • jpeg(v.0.1-9)
  • pkgconfig(v.2.0.3)
  • lmerTest(v.3.1-3)
  • merTools(v.0.5.2)


References